From e07de44d427b9ad1fc15de336a082e85ec34456d Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 <80635572+YiscahLevySilas1@users.noreply.github.com> Date: Thu, 12 Oct 2023 18:40:45 +0300 Subject: [PATCH 001/195] Fix fixpath for controls C-0077 and C-0076 (#523) * SUB-2185 - improve C-0262 Signed-off-by: YiscahLevySilas1 * minor fix Signed-off-by: YiscahLevySilas1 * add [] to fixpath Signed-off-by: YiscahLevySilas1 * add [] to fixpath Signed-off-by: YiscahLevySilas1 * add [] to fixpath Signed-off-by: YiscahLevySilas1 --------- Signed-off-by: YiscahLevySilas1 --- rules/k8s-common-labels-usage/raw.rego | 6 +++--- rules/k8s-common-labels-usage/test/cronjob/expected.json | 2 +- rules/k8s-common-labels-usage/test/pod/expected.json | 2 +- .../test/workload-fail/expected.json | 2 +- rules/label-usage-for-resources/raw.rego | 6 +++--- rules/label-usage-for-resources/test/cronjob/expected.json | 4 ++-- rules/label-usage-for-resources/test/pod/expected.json | 2 +- .../test/workload-fail/expected.json | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/rules/k8s-common-labels-usage/raw.rego b/rules/k8s-common-labels-usage/raw.rego index 238b41216..7a6a29c7e 100644 --- a/rules/k8s-common-labels-usage/raw.rego +++ b/rules/k8s-common-labels-usage/raw.rego @@ -87,21 +87,21 @@ no_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{ no_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{ not wl.metadata.labels label_key := get_label_key("") - path = [{"path": sprintf("%vmetadata.labels.%v", [start_of_path, label_key]), "value": "YOUR_VALUE"}] + path = [{"path": sprintf("%vmetadata.labels[%v]", [start_of_path, label_key]), "value": "YOUR_VALUE"}] } no_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{ metadata := wl.metadata not metadata.labels label_key := get_label_key("") - path = [{"path": sprintf("%vmetadata.labels.%v", [start_of_path, label_key]), "value": "YOUR_VALUE"}] + path = [{"path": sprintf("%vmetadata.labels[%v]", [start_of_path, label_key]), "value": "YOUR_VALUE"}] } no_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{ labels := wl.metadata.labels not all_kubernetes_labels(labels) label_key := get_label_key("") - path = [{"path": sprintf("%vmetadata.labels.%v", [start_of_path, label_key]), "value": "YOUR_VALUE"}] + path = [{"path": sprintf("%vmetadata.labels[%v]", [start_of_path, label_key]), "value": "YOUR_VALUE"}] } all_kubernetes_labels(labels){ diff --git a/rules/k8s-common-labels-usage/test/cronjob/expected.json b/rules/k8s-common-labels-usage/test/cronjob/expected.json index 2f9d26829..39bd3724c 100644 --- a/rules/k8s-common-labels-usage/test/cronjob/expected.json +++ b/rules/k8s-common-labels-usage/test/cronjob/expected.json @@ -2,7 +2,7 @@ "alertMessage": "the following cronjobs the kubernetes common labels are not defined: hello", "failedPaths": [], "fixPaths": [{ - "path": "spec.jobTemplate.spec.template.metadata.labels.app.kubernetes.io/name", + "path": "spec.jobTemplate.spec.template.metadata.labels[app.kubernetes.io/name]", "value": "YOUR_VALUE" }], "ruleStatus": "", diff --git a/rules/k8s-common-labels-usage/test/pod/expected.json b/rules/k8s-common-labels-usage/test/pod/expected.json index 2a4cac865..ee876ef1b 100644 --- a/rules/k8s-common-labels-usage/test/pod/expected.json +++ b/rules/k8s-common-labels-usage/test/pod/expected.json @@ -2,7 +2,7 @@ "alertMessage": "in the following pod the kubernetes common labels are not defined: command-demo", "failedPaths": [], "fixPaths": [{ - "path": "metadata.labels.YOUR_LABEL", + "path": "metadata.labels[YOUR_LABEL]", "value": "YOUR_VALUE" }], "ruleStatus": "", diff --git a/rules/k8s-common-labels-usage/test/workload-fail/expected.json b/rules/k8s-common-labels-usage/test/workload-fail/expected.json index 3a98cdfa0..105929639 100644 --- a/rules/k8s-common-labels-usage/test/workload-fail/expected.json +++ b/rules/k8s-common-labels-usage/test/workload-fail/expected.json @@ -2,7 +2,7 @@ "alertMessage": "Deployment: kubernetes-dashboard the kubernetes common labels are is not defined:", "failedPaths": [], "fixPaths": [{ - "path": "spec.template.metadata.labels.app.kubernetes.io/name", + "path": "spec.template.metadata.labels[app.kubernetes.io/name]", "value": "YOUR_VALUE" }], "ruleStatus": "", diff --git a/rules/label-usage-for-resources/raw.rego b/rules/label-usage-for-resources/raw.rego index a8f8e82e8..06047c3b5 100644 --- a/rules/label-usage-for-resources/raw.rego +++ b/rules/label-usage-for-resources/raw.rego @@ -85,21 +85,21 @@ no_label_usage(wl, podSpec, beggining_of_pod_path) = path{ no_label_or_no_label_usage(wl, start_of_path) = path{ not wl.metadata label_key := get_label_key("") - path = [{"path": sprintf("%vmetadata.labels.%v", [start_of_path, label_key]), "value": "YOUR_VALUE"}] + path = [{"path": sprintf("%vmetadata.labels[%v]", [start_of_path, label_key]), "value": "YOUR_VALUE"}] } no_label_or_no_label_usage(wl, start_of_path) = path{ metadata := wl.metadata not metadata.labels label_key := get_label_key("") - path = [{"path": sprintf("%vmetadata.labels.%v", [start_of_path, label_key]), "value": "YOUR_VALUE"}] + path = [{"path": sprintf("%vmetadata.labels[%v]", [start_of_path, label_key]), "value": "YOUR_VALUE"}] } no_label_or_no_label_usage(wl, start_of_path) = path{ labels := wl.metadata.labels not is_desired_label(labels) label_key := get_label_key("") - path = [{"path": sprintf("%vmetadata.labels.%v", [start_of_path, label_key]), "value": "YOUR_VALUE"}] + path = [{"path": sprintf("%vmetadata.labels[%v]", [start_of_path, label_key]), "value": "YOUR_VALUE"}] } is_desired_label(labels) { diff --git a/rules/label-usage-for-resources/test/cronjob/expected.json b/rules/label-usage-for-resources/test/cronjob/expected.json index 595a928d3..8e24502e9 100644 --- a/rules/label-usage-for-resources/test/cronjob/expected.json +++ b/rules/label-usage-for-resources/test/cronjob/expected.json @@ -2,10 +2,10 @@ "alertMessage": "the following cronjobs a certain set of labels is not defined: hello", "failedPaths": [], "fixPaths": [{ - "path": "metadata.labels.YOUR_LABEL", + "path": "metadata.labels[YOUR_LABEL]", "value": "YOUR_VALUE" }, { - "path": "spec.jobTemplate.spec.template.metadata.labels.YOUR_LABEL", + "path": "spec.jobTemplate.spec.template.metadata.labels[YOUR_LABEL]", "value": "YOUR_VALUE" }], "ruleStatus": "", diff --git a/rules/label-usage-for-resources/test/pod/expected.json b/rules/label-usage-for-resources/test/pod/expected.json index ffcc45464..159053bb6 100644 --- a/rules/label-usage-for-resources/test/pod/expected.json +++ b/rules/label-usage-for-resources/test/pod/expected.json @@ -2,7 +2,7 @@ "alertMessage": "in the following pods a certain set of labels is not defined: command-demo", "failedPaths": [], "fixPaths": [{ - "path": "metadata.labels.app", + "path": "metadata.labels[app]", "value": "YOUR_VALUE" }], "ruleStatus": "", diff --git a/rules/label-usage-for-resources/test/workload-fail/expected.json b/rules/label-usage-for-resources/test/workload-fail/expected.json index dcf7acfeb..ff103d96a 100644 --- a/rules/label-usage-for-resources/test/workload-fail/expected.json +++ b/rules/label-usage-for-resources/test/workload-fail/expected.json @@ -2,7 +2,7 @@ "alertMessage": "Deployment: kubernetes-dashboard a certain set of labels is not defined:", "failedPaths": [], "fixPaths": [{ - "path": "spec.template.metadata.labels.app", + "path": "spec.template.metadata.labels[app]", "value": "YOUR_VALUE" }], "ruleStatus": "", From ef856b7692ae900b21af7f257689a9291b09cd9d Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 15 Oct 2023 14:22:55 +0300 Subject: [PATCH 002/195] add missing delete paths Signed-off-by: YiscahLevySilas1 --- rules/rule-credentials-configmap/raw.rego | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rules/rule-credentials-configmap/raw.rego b/rules/rule-credentials-configmap/raw.rego index 7486b62d2..71d21beda 100644 --- a/rules/rule-credentials-configmap/raw.rego +++ b/rules/rule-credentials-configmap/raw.rego @@ -49,6 +49,7 @@ deny[msga] { msga := { "alertMessage": sprintf("this configmap has sensitive information: %v", [configmap.metadata.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "fixPaths": [], "packagename": "armo_builtins", @@ -81,6 +82,7 @@ deny[msga] { msga := { "alertMessage": sprintf("this configmap has sensitive information: %v", [configmap.metadata.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "fixPaths": [], "packagename": "armo_builtins", From 28f6129873c7142e8bcc8316655c5bb9a80a9154 Mon Sep 17 00:00:00 2001 From: Ben Hirschberg <59160382+slashben@users.noreply.github.com> Date: Mon, 16 Oct 2023 15:22:46 +0300 Subject: [PATCH 003/195] Fix in exposure to internet rule: making sure that service and ingress are in the same namespace Signed-off-by: Ben Hirschberg <59160382+slashben@users.noreply.github.com> --- rules/exposure-to-internet/raw.rego | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/rules/exposure-to-internet/raw.rego b/rules/exposure-to-internet/raw.rego index e31379b8b..f059c6811 100644 --- a/rules/exposure-to-internet/raw.rego +++ b/rules/exposure-to-internet/raw.rego @@ -35,6 +35,10 @@ deny[msga] { svc := input[_] svc.kind == "Service" + + # Make sure that they belong to the same namespace + svc.metadata.namespace == ingress.metadata.namespace + # avoid duplicate alerts # if service is already exposed through NodePort or LoadBalancer workload will fail on that not is_exposed_service(svc) From f9fa144f5c7de8d7f1dcc82d62e6d2487dead343 Mon Sep 17 00:00:00 2001 From: Ben Hirschberg <59160382+slashben@users.noreply.github.com> Date: Mon, 16 Oct 2023 15:29:09 +0300 Subject: [PATCH 004/195] adding service to related objects Signed-off-by: Ben Hirschberg <59160382+slashben@users.noreply.github.com> --- rules/exposure-to-internet/raw.rego | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/rules/exposure-to-internet/raw.rego b/rules/exposure-to-internet/raw.rego index f059c6811..942e7e4e6 100644 --- a/rules/exposure-to-internet/raw.rego +++ b/rules/exposure-to-internet/raw.rego @@ -59,11 +59,16 @@ deny[msga] { "alertObject": { "k8sApiObjects": [wl] }, - "relatedObjects": [{ - "object": ingress, + "relatedObjects": [ + { + "object": ingress, "reviewPaths": result, - "failedPaths": result, - }] + "failedPaths": result, + }, + { + "object": svc, + } + ] } } From 9e388645fe3021c79cb591122ce6d693927b252c Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 17:00:14 +0300 Subject: [PATCH 005/195] fix test Signed-off-by: YiscahLevySilas1 --- .../test/failed_with_ingress/expected.json | 25 ++++++++++++++++++- .../failed_with_ingress/input/ingress.yaml | 1 + .../failed_with_ingress/input/service.yaml | 1 + 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/rules/exposure-to-internet/test/failed_with_ingress/expected.json b/rules/exposure-to-internet/test/failed_with_ingress/expected.json index 958f9eaf0..5ce02f86b 100644 --- a/rules/exposure-to-internet/test/failed_with_ingress/expected.json +++ b/rules/exposure-to-internet/test/failed_with_ingress/expected.json @@ -23,7 +23,8 @@ "apiVersion": "networking.k8s.io/v1", "kind": "Ingress", "metadata": { - "name": "my-ingress" + "name": "my-ingress", + "namespace": "default" }, "spec": { "ingressClassName": "nginx", @@ -54,6 +55,28 @@ "spec.rules[0].http.paths[0].backend.service.name" ], "fixPaths": null + }, + { + "object": { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "name": "my-service", + "namespace": "default" + }, + "spec": { + "ports": [ + { + "port": 80, + "targetPort": 80 + } + ], + "selector": { + "app": "my-app" + }, + "type": "ClusterIP" + } + } } ] } diff --git a/rules/exposure-to-internet/test/failed_with_ingress/input/ingress.yaml b/rules/exposure-to-internet/test/failed_with_ingress/input/ingress.yaml index 096c24a22..4cc9b174d 100644 --- a/rules/exposure-to-internet/test/failed_with_ingress/input/ingress.yaml +++ b/rules/exposure-to-internet/test/failed_with_ingress/input/ingress.yaml @@ -2,6 +2,7 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: my-ingress + namespace: default spec: ingressClassName: nginx rules: diff --git a/rules/exposure-to-internet/test/failed_with_ingress/input/service.yaml b/rules/exposure-to-internet/test/failed_with_ingress/input/service.yaml index 7ba441575..9ad14d173 100644 --- a/rules/exposure-to-internet/test/failed_with_ingress/input/service.yaml +++ b/rules/exposure-to-internet/test/failed_with_ingress/input/service.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: Service metadata: name: my-service + namespace: default spec: selector: app: my-app From 303565815f80c6b896c35f2457439355305e6d63 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 17:00:52 +0300 Subject: [PATCH 006/195] change control scope Signed-off-by: YiscahLevySilas1 --- controls/C-0256-exposuretointernet.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/controls/C-0256-exposuretointernet.json b/controls/C-0256-exposuretointernet.json index cc35d4e7b..a65f5b3f5 100644 --- a/controls/C-0256-exposuretointernet.json +++ b/controls/C-0256-exposuretointernet.json @@ -28,8 +28,7 @@ "baseScore": 7.0, "scanningScope": { "matches": [ - "cluster", - "file" + "cluster" ] } } From c27b8d0f3561f419ac5270f004f2764e01bd50a7 Mon Sep 17 00:00:00 2001 From: rcohencyberarmor Date: Mon, 16 Oct 2023 17:02:05 +0300 Subject: [PATCH 007/195] cis-v1.23-t1.0.1 controls - move scope from cloud to cluster Signed-off-by: rcohencyberarmor --- ...ecificationfilepermissionsaresetto600ormorerestrictive.json | 2 +- ...eapiserverpodspecificationfileownershipissettorootroot.json | 2 +- ...ecificationfilepermissionsaresetto600ormorerestrictive.json | 2 +- ...lermanagerpodspecificationfileownershipissettorootroot.json | 2 +- ...ecificationfilepermissionsaresetto600ormorerestrictive.json | 2 +- ...eschedulerpodspecificationfileownershipissettorootroot.json | 2 +- ...ecificationfilepermissionsaresetto600ormorerestrictive.json | 2 +- ...hattheetcdpodspecificationfileownershipissettorootroot.json | 2 +- ...rkinterfacefilepermissionsaresetto600ormorerestrictive.json | 2 +- ...econtainernetworkinterfacefileownershipissettorootroot.json | 2 +- ...cddatadirectorypermissionsaresetto700ormorerestrictive.json | 2 +- ...ensurethattheetcddatadirectoryownershipissettoetcdetcd.json | 2 +- ...-0104-ensurethattheadminconffilepermissionsaresetto600.json | 2 +- ...105-ensurethattheadminconffileownershipissettorootroot.json | 2 +- ...hedulerconffilepermissionsaresetto600ormorerestrictive.json | 2 +- ...ensurethattheschedulerconffileownershipissettorootroot.json | 2 +- ...managerconffilepermissionsaresetto600ormorerestrictive.json | 2 +- ...atthecontrollermanagerconffileownershipissettorootroot.json | 2 +- ...ekubernetespkidirectoryandfileownershipissettorootroot.json | 2 +- ...certificatefilepermissionsaresetto600ormorerestrictive.json | 2 +- ...nsurethatthekubernetespkikeyfilepermissionsaresetto600.json | 2 +- ...nsurethattheapiserveranonymousauthargumentissettofalse.json | 2 +- ...4-ensurethattheapiservertokenauthfileparameterisnotset.json | 2 +- ...5-ensurethattheapiserverdenyserviceexternalipsisnotset.json | 2 +- ...ificateandkubeletclientkeyargumentsaresetasappropriate.json | 2 +- ...rkubeletcertificateauthorityargumentissetasappropriate.json | 2 +- ...piserverauthorizationmodeargumentisnotsettoalwaysallow.json | 2 +- ...ethattheapiserverauthorizationmodeargumentincludesnode.json | 2 +- ...ethattheapiserverauthorizationmodeargumentincludesrbac.json | 2 +- ...ensurethattheadmissioncontrolplugineventratelimitisset.json | 2 +- ...ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json | 2 +- ...surethattheadmissioncontrolpluginalwayspullimagesisset.json | 2 +- ...insecuritycontextdenyissetifpodsecuritypolicyisnotused.json | 2 +- ...ensurethattheadmissioncontrolpluginserviceaccountisset.json | 2 +- ...rethattheadmissioncontrolpluginnamespacelifecycleisset.json | 2 +- ...nsurethattheadmissioncontrolpluginnoderestrictionisset.json | 2 +- ...28-ensurethattheapiserversecureportargumentisnotsetto0.json | 2 +- ...29-ensurethattheapiserverprofilingargumentissettofalse.json | 2 +- ...C-0130-ensurethattheapiserverauditlogpathargumentisset.json | 2 +- ...piserverauditlogmaxageargumentissetto30orasappropriate.json | 2 +- ...erverauditlogmaxbackupargumentissetto10orasappropriate.json | 2 +- ...serverauditlogmaxsizeargumentissetto100orasappropriate.json | 2 +- ...attheapiserverrequesttimeoutargumentissetasappropriate.json | 2 +- ...hattheapiserverserviceaccountlookupargumentissettotrue.json | 2 +- ...iserverserviceaccountkeyfileargumentissetasappropriate.json | 2 +- ...etcdcertfileandetcdkeyfileargumentsaresetasappropriate.json | 2 +- ...rtfileandtlsprivatekeyfileargumentsaresetasappropriate.json | 2 +- ...thattheapiserverclientcafileargumentissetasappropriate.json | 2 +- ...rethattheapiserveretcdcafileargumentissetasappropriate.json | 2 +- ...rverencryptionproviderconfigargumentissetasappropriate.json | 2 +- ...nsurethatencryptionprovidersareappropriatelyconfigured.json | 2 +- ...attheapiserveronlymakesuseofstrongcryptographicciphers.json | 2 +- ...agerterminatedpodgcthresholdargumentissetasappropriate.json | 2 +- ...ethatthecontrollermanagerprofilingargumentissettofalse.json | 2 +- ...manageruseserviceaccountcredentialsargumentissettotrue.json | 2 +- ...serviceaccountprivatekeyfileargumentissetasappropriate.json | 2 +- ...econtrollermanagerrootcafileargumentissetasappropriate.json | 2 +- ...nagerrotatekubeletservercertificateargumentissettotrue.json | 2 +- ...atthecontrollermanagerbindaddressargumentissetto127001.json | 2 +- ...51-ensurethattheschedulerprofilingargumentissettofalse.json | 2 +- ...ensurethattheschedulerbindaddressargumentissetto127001.json | 2 +- ...ethatthecertfileandkeyfileargumentsaresetasappropriate.json | 2 +- .../C-0154-ensurethattheclientcertauthargumentissettotrue.json | 2 +- .../C-0155-ensurethattheautotlsargumentisnotsettotrue.json | 2 +- ...peercertfileandpeerkeyfileargumentsaresetasappropriate.json | 2 +- ...157-ensurethatthepeerclientcertauthargumentissettotrue.json | 2 +- .../C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json | 2 +- ...159-ensurethatauniquecertificateauthorityisusedforetcd.json | 2 +- controls/C-0160-ensurethataminimalauditpolicyiscreated.json | 2 +- ...0161-ensurethattheauditpolicycoverskeysecurityconcerns.json | 2 +- ...eletservicefilepermissionsaresetto600ormorerestrictive.json | 2 +- ...nsurethatthekubeletservicefileownershipissettorootroot.json | 2 +- ...ileexistsensurepermissionsaresetto600ormorerestrictive.json | 2 +- ...roxykubeconfigfileexistsensureownershipissettorootroot.json | 2 +- ...kubeletconffilepermissionsaresetto600ormorerestrictive.json | 2 +- ...atthekubeconfigkubeletconffileownershipissettorootroot.json | 2 +- ...authoritiesfilepermissionsaresetto600ormorerestrictive.json | 2 +- ...ientcertificateauthoritiesfileownershipissettorootroot.json | 2 +- ...sbeingusedvalidatepermissionssetto600ormorerestrictive.json | 2 +- ...ionfileisbeingusedvalidatefileownershipissettorootroot.json | 2 +- .../C-0172-ensurethattheanonymousauthargumentissettofalse.json | 2 +- ...ethattheauthorizationmodeargumentisnotsettoalwaysallow.json | 2 +- ...74-ensurethattheclientcafileargumentissetasappropriate.json | 2 +- controls/C-0175-verifythatthereadonlyportargumentissetto0.json | 2 +- ...atthestreamingconnectionidletimeoutargumentisnotsetto0.json | 2 +- ...-ensurethattheprotectkerneldefaultsargumentissettotrue.json | 2 +- ...ensurethatthemakeiptablesutilchainsargumentissettotrue.json | 2 +- .../C-0179-ensurethatthehostnameoverrideargumentisnotset.json | 2 +- ...entissetto0oralevelwhichensuresappropriateeventcapture.json | 2 +- ...rtfileandtlsprivatekeyfileargumentsaresetasappropriate.json | 2 +- ...ensurethattherotatecertificatesargumentisnotsettofalse.json | 2 +- ...attherotatekubeletservercertificateargumentissettotrue.json | 2 +- ...thatthekubeletonlymakesuseofstrongcryptographicciphers.json | 2 +- ...5-ensurethattheclusteradminroleisonlyusedwhererequired.json | 2 +- controls/C-0186-minimizeaccesstosecrets.json | 2 +- controls/C-0187-minimizewildcarduseinrolesandclusterroles.json | 3 +-- controls/C-0188-minimizeaccesstocreatepods.json | 2 +- ...189-ensurethatdefaultserviceaccountsarenotactivelyused.json | 2 +- ...rethatserviceaccounttokensareonlymountedwherenecessary.json | 2 +- ...mpersonateandescalatepermissionsinthekubernetescluster.json | 2 +- ...lusterhasatleastoneactivepolicycontrolmechanisminplace.json | 2 +- .../C-0193-minimizetheadmissionofprivilegedcontainers.json | 2 +- ...ionofcontainerswishingtosharethehostprocessidnamespace.json | 2 +- ...admissionofcontainerswishingtosharethehostipcnamespace.json | 2 +- ...ssionofcontainerswishingtosharethehostnetworknamespace.json | 2 +- ...zetheadmissionofcontainerswithallowprivilegeescalation.json | 2 +- controls/C-0198-minimizetheadmissionofrootcontainers.json | 2 +- ...nimizetheadmissionofcontainerswiththenet_rawcapability.json | 2 +- ...-minimizetheadmissionofcontainerswithaddedcapabilities.json | 2 +- ...nimizetheadmissionofcontainerswithcapabilitiesassigned.json | 2 +- ...202-minimizetheadmissionofwindowshostprocesscontainers.json | 2 +- controls/C-0203-minimizetheadmissionofhostpathvolumes.json | 2 +- ...0204-minimizetheadmissionofcontainerswhichusehostports.json | 2 +- .../C-0205-ensurethatthecniinusesupportsnetworkpolicies.json | 2 +- ...0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json | 2 +- ...erusingsecretsasfilesoversecretsasenvironmentvariables.json | 2 +- controls/C-0208-considerexternalsecretstorage.json | 2 +- ...dministrativeboundariesbetweenresourcesusingnamespaces.json | 2 +- ...seccompprofileissettodockerdefaultinyourpoddefinitions.json | 2 +- .../C-0211-applysecuritycontexttoyourpodsandcontainers.json | 2 +- controls/C-0212-thedefaultnamespaceshouldnotbeused.json | 2 +- 121 files changed, 121 insertions(+), 122 deletions(-) diff --git a/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json index de61a4dd3..83585738c 100644 --- a/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json b/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json index c45ecef4c..2dbe3d2b4 100644 --- a/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json index fea592b35..eb1d15e95 100644 --- a/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json b/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json index eda16e975..e02de4b08 100644 --- a/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json index 85eaab575..b5581de00 100644 --- a/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json b/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json index 0b1b17149..0b2c5aa11 100644 --- a/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json index 424fe3f15..183e05409 100644 --- a/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json b/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json index 1e0271620..b9bda38cc 100644 --- a/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json b/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json index 8201304fb..d7ad41f35 100644 --- a/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json b/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json index 5bf97b155..6af932e5f 100644 --- a/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json +++ b/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json b/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json index 5e86b3f87..3c960b0c4 100644 --- a/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json +++ b/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json b/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json index 2b6c363b1..a314344f8 100644 --- a/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json +++ b/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json b/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json index d3354f989..9a02cc1ab 100644 --- a/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json +++ b/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json b/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json index 85a025e80..b4a0c6800 100644 --- a/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json +++ b/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json b/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json index 019a12916..ee0881b49 100644 --- a/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json b/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json index bedbaa4cf..ec2f43577 100644 --- a/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json +++ b/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json b/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json index d10e9dc58..0908a7c2d 100644 --- a/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json b/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json index 1f955425f..cd6dbd8f9 100644 --- a/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json +++ b/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json b/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json index 0b5d14501..0848e6b5f 100644 --- a/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json +++ b/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json b/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json index f8ef841a2..35d726979 100644 --- a/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json b/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json index e70d4f9cb..e55dd8fd4 100644 --- a/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json +++ b/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json b/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json index 0f3b394de..2c55415c8 100644 --- a/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json +++ b/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json b/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json index 80e827ab5..dde0314d9 100644 --- a/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json +++ b/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json b/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json index 8a052258b..fca74ebde 100644 --- a/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json +++ b/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json b/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json index 903f4b6a2..908414850 100644 --- a/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json +++ b/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json b/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json index a8d7beb37..350c03104 100644 --- a/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json +++ b/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json b/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json index d1e466ede..e950ca3a5 100644 --- a/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json +++ b/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json b/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json index b4d59d6f4..5cb48553d 100644 --- a/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json +++ b/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json b/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json index 794a47b24..200341870 100644 --- a/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json +++ b/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json b/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json index 8ece62802..9e50895bf 100644 --- a/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json +++ b/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json b/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json index 8a01ffe7f..5a4e284ee 100644 --- a/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json +++ b/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json b/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json index 939546258..ff57e1385 100644 --- a/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json +++ b/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json @@ -22,7 +22,7 @@ "default_value": "By default, `AlwaysPullImages` is not set.", "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json b/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json index b7dd643e0..71801308b 100644 --- a/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json +++ b/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json b/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json index f323a790e..4887e8abd 100644 --- a/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json +++ b/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json b/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json index 1f0035541..4ed20d813 100644 --- a/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json +++ b/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json b/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json index bf4d32232..08815eafa 100644 --- a/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json +++ b/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json b/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json index 975b4b4ec..9f9712d53 100644 --- a/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json +++ b/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json b/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json index 5481d1360..39d1ac300 100644 --- a/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json +++ b/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json b/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json index a7e8a10c0..d46ce48d8 100644 --- a/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json +++ b/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json b/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json index 64b60bdb5..aaad447eb 100644 --- a/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json +++ b/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json b/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json index 38f0c5b2c..dcd60ae44 100644 --- a/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json +++ b/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json b/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json index b2686ff52..5dcb8fc50 100644 --- a/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json +++ b/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json b/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json index 20d462c2d..ace65d7c3 100644 --- a/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json +++ b/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json b/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json index 1f041716d..7a62a5a5c 100644 --- a/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json +++ b/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json b/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json index fb00096f2..a45bd2882 100644 --- a/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json +++ b/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json b/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json index 1a3c586c6..f9bf27741 100644 --- a/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json +++ b/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json b/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json index a367ccfc2..512dcd8cf 100644 --- a/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json +++ b/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json b/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json index a9ae1a17d..f76647d74 100644 --- a/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json +++ b/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json b/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json index e48543946..90f6ce961 100644 --- a/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json +++ b/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json b/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json index d8cd51e3d..ecd759843 100644 --- a/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json +++ b/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json b/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json index 5a16fcf5a..a3ecf1a73 100644 --- a/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json +++ b/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json b/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json index 640fc5bea..f7133e991 100644 --- a/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json +++ b/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json b/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json index 91a3285e9..79a973396 100644 --- a/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json +++ b/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json b/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json index d9ac69d3e..a7beb6144 100644 --- a/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json +++ b/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json b/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json index 7a38fc012..83edbab75 100644 --- a/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json +++ b/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json @@ -22,7 +22,7 @@ "default_value": "By default, `--use-service-account-credentials` is set to false.", "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json b/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json index 7c1ef253c..d663d0013 100644 --- a/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json +++ b/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json b/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json index 39af5c01d..d2a44513e 100644 --- a/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json +++ b/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json b/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json index 8d0ba98ff..3f4a4c173 100644 --- a/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json +++ b/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json b/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json index f0b3593a1..666237258 100644 --- a/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json +++ b/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json b/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json index 6b5492f05..ad3b207f1 100644 --- a/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json +++ b/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json b/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json index bcb0a2a07..f4498708c 100644 --- a/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json +++ b/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json b/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json index aea7b7b7a..59da2a849 100644 --- a/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json +++ b/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json b/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json index bd2c0c71f..ae94607fd 100644 --- a/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json +++ b/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json b/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json index ba2bfb209..14cbae1c1 100644 --- a/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json +++ b/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json b/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json index 28dd920dc..82c54ff33 100644 --- a/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json +++ b/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json @@ -22,7 +22,7 @@ "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json b/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json index a3cc8d04c..72d0072c3 100644 --- a/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json +++ b/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json @@ -22,7 +22,7 @@ "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json b/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json index 816dfd2a8..6c5711651 100644 --- a/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json +++ b/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json b/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json index d9067e17e..cc1a39a13 100644 --- a/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json +++ b/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0160-ensurethataminimalauditpolicyiscreated.json b/controls/C-0160-ensurethataminimalauditpolicyiscreated.json index 302d80645..9aaf518fa 100644 --- a/controls/C-0160-ensurethataminimalauditpolicyiscreated.json +++ b/controls/C-0160-ensurethataminimalauditpolicyiscreated.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json b/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json index 507ecf299..116fd1974 100644 --- a/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json +++ b/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json b/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json index 2f3de829d..5ae6b9c7b 100644 --- a/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json b/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json index a4002c340..4c9b2dfd1 100644 --- a/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json +++ b/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json b/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json index 242f4e5e4..c5ba8caa6 100644 --- a/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json b/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json index 2ba57e278..440285c4a 100644 --- a/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json +++ b/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json b/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json index 13d89aea8..5f1a11289 100644 --- a/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json b/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json index 08888e347..e3f6b972e 100644 --- a/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json +++ b/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json index ea9ff5a30..e7fd6c874 100644 --- a/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json b/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json index 0262db91d..f02c6fee6 100644 --- a/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json +++ b/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json b/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json index b5440909a..1935aa234 100644 --- a/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json +++ b/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json b/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json index 72b032332..f5747f9dd 100644 --- a/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json +++ b/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json b/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json index 1eacc7167..418ef34f6 100644 --- a/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json +++ b/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json b/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json index 510cc12cb..5315e42a0 100644 --- a/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json +++ b/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json b/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json index 099b906a3..e629a726f 100644 --- a/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json +++ b/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0175-verifythatthereadonlyportargumentissetto0.json b/controls/C-0175-verifythatthereadonlyportargumentissetto0.json index 651169e18..47d7617a2 100644 --- a/controls/C-0175-verifythatthereadonlyportargumentissetto0.json +++ b/controls/C-0175-verifythatthereadonlyportargumentissetto0.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json b/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json index 6073fdfde..d5f6d5fd5 100644 --- a/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json +++ b/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json b/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json index 28313cffd..fc5aad85c 100644 --- a/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json +++ b/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json b/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json index 507ab2c14..5872494fa 100644 --- a/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json +++ b/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json b/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json index c7db62e8a..68573eb20 100644 --- a/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json +++ b/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json b/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json index 6875418e8..edb7998f3 100644 --- a/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json +++ b/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json b/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json index e6585c203..41004e0c8 100644 --- a/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json +++ b/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json b/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json index f2f4e10a3..ac1e8f2df 100644 --- a/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json +++ b/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json b/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json index c9c33f4a1..a2e3eeb54 100644 --- a/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json +++ b/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json b/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json index 867aa5ff4..1a858e92f 100644 --- a/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json +++ b/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json b/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json index 27dc6fd3c..e591e9553 100644 --- a/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json +++ b/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0186-minimizeaccesstosecrets.json b/controls/C-0186-minimizeaccesstosecrets.json index 2717be8c0..27caec6c8 100644 --- a/controls/C-0186-minimizeaccesstosecrets.json +++ b/controls/C-0186-minimizeaccesstosecrets.json @@ -23,7 +23,7 @@ "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json b/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json index 1a09b3e9c..df7589ece 100644 --- a/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json +++ b/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json @@ -23,8 +23,7 @@ }, "scanningScope": { "matches": [ - "cluster", - "file" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0188-minimizeaccesstocreatepods.json b/controls/C-0188-minimizeaccesstocreatepods.json index 9894f0f6f..e88f6bfff 100644 --- a/controls/C-0188-minimizeaccesstocreatepods.json +++ b/controls/C-0188-minimizeaccesstocreatepods.json @@ -23,7 +23,7 @@ "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json b/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json index 18d01d325..94bfb33f7 100644 --- a/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json +++ b/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json @@ -24,7 +24,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json b/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json index e442061f7..14609b37b 100644 --- a/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json +++ b/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json b/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json index f63300f60..9583d658c 100644 --- a/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json +++ b/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json b/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json index 751567761..9e455bd40 100644 --- a/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json +++ b/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json @@ -24,7 +24,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json b/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json index f26d41490..20b34595f 100644 --- a/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json +++ b/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json b/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json index 2a0bd2cae..622a4f79b 100644 --- a/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json +++ b/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json b/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json index 6f39f2806..ca8366d23 100644 --- a/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json +++ b/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json b/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json index 8113cb719..9fd2f5389 100644 --- a/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json +++ b/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json b/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json index 1e632ee04..159116541 100644 --- a/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json +++ b/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0198-minimizetheadmissionofrootcontainers.json b/controls/C-0198-minimizetheadmissionofrootcontainers.json index b014fe9c6..069ff1e80 100644 --- a/controls/C-0198-minimizetheadmissionofrootcontainers.json +++ b/controls/C-0198-minimizetheadmissionofrootcontainers.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json b/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json index b7cd4a991..30f8d3640 100644 --- a/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json +++ b/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json b/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json index eb0ec059d..851624db0 100644 --- a/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json +++ b/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json b/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json index 5837b34cc..d90d4800a 100644 --- a/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json +++ b/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json b/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json index a405a144e..7f77bd3de 100644 --- a/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json +++ b/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0203-minimizetheadmissionofhostpathvolumes.json b/controls/C-0203-minimizetheadmissionofhostpathvolumes.json index 08d155323..c6e32149a 100644 --- a/controls/C-0203-minimizetheadmissionofhostpathvolumes.json +++ b/controls/C-0203-minimizetheadmissionofhostpathvolumes.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json b/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json index 15962e85a..3fd35c822 100644 --- a/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json +++ b/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json b/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json index 807f396d8..e63a4cc81 100644 --- a/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json +++ b/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json @@ -22,7 +22,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json b/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json index 153d5c606..4a5098c85 100644 --- a/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json +++ b/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json b/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json index 083ee8d02..133eeb89e 100644 --- a/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json +++ b/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json @@ -26,7 +26,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0208-considerexternalsecretstorage.json b/controls/C-0208-considerexternalsecretstorage.json index c97d5293c..531a1fcf5 100644 --- a/controls/C-0208-considerexternalsecretstorage.json +++ b/controls/C-0208-considerexternalsecretstorage.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json b/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json index 9b16df013..03de1615e 100644 --- a/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json +++ b/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json @@ -23,7 +23,7 @@ "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json b/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json index d854fc293..61ef46bfc 100644 --- a/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json +++ b/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json @@ -23,7 +23,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json b/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json index 700f9c67f..e1ec5c70d 100644 --- a/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json +++ b/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json @@ -45,7 +45,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0212-thedefaultnamespaceshouldnotbeused.json b/controls/C-0212-thedefaultnamespaceshouldnotbeused.json index d5882df6d..92d0cb6b2 100644 --- a/controls/C-0212-thedefaultnamespaceshouldnotbeused.json +++ b/controls/C-0212-thedefaultnamespaceshouldnotbeused.json @@ -39,7 +39,7 @@ }, "scanningScope": { "matches": [ - "cloud" + "cluster" ] } } \ No newline at end of file From cee627f7d7cf7a737c4d0933f4a263668fae64a1 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 18:33:07 +0300 Subject: [PATCH 008/195] fix control C-0004 Signed-off-by: YiscahLevySilas1 --- .../raw.rego | 117 ++++++++++++------ .../test/cronjob/expected.json | 22 +++- .../test/pod-only-limits/expected.json | 20 +++ .../test/pod-only-limits/input/pod.yaml | 23 ++++ .../test/pod-only-requests/expected.json | 20 +++ .../test/pod-only-requests/input/pod.yaml | 23 ++++ .../test/pod/expected.json | 69 +++++++---- .../test/workload/expected.json | 25 +++- 8 files changed, 259 insertions(+), 60 deletions(-) create mode 100644 rules/resources-memory-limit-and-request/test/pod-only-limits/expected.json create mode 100644 rules/resources-memory-limit-and-request/test/pod-only-limits/input/pod.yaml create mode 100644 rules/resources-memory-limit-and-request/test/pod-only-requests/expected.json create mode 100644 rules/resources-memory-limit-and-request/test/pod-only-requests/input/pod.yaml diff --git a/rules/resources-memory-limit-and-request/raw.rego b/rules/resources-memory-limit-and-request/raw.rego index cf1c9f289..a82209c01 100644 --- a/rules/resources-memory-limit-and-request/raw.rego +++ b/rules/resources-memory-limit-and-request/raw.rego @@ -1,15 +1,13 @@ package armo_builtins -# Fails if pod does not have container with memory-limit or request +# ================================== memory limits ================================== +# Fails if pod does not have container with memory-limits deny[msga] { pod := input[_] pod.kind == "Pod" container := pod.spec.containers[i] - not request_or_limit_memory(container) - fixPaths := [ - {"path": sprintf("spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - ] + not container.resources.limits.memory + fixPaths := [{"path": sprintf("spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v does not have memory-limit or request", [container.name]), @@ -21,17 +19,14 @@ deny[msga] { } } -# Fails if workload does not have container with memory-limit or request +# Fails if workload does not have container with memory-limits deny[msga] { wl := input[_] spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} spec_template_spec_patterns[wl.kind] container := wl.spec.template.spec.containers[i] - not request_or_limit_memory(container) - fixPaths := [ - {"path": sprintf("spec.template.spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.template.spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - ] + not container.resources.limits.memory + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), @@ -43,16 +38,69 @@ deny[msga] { } } -# Fails if cronjob does not have container with memory-limit or request +# Fails if cronjob does not have container with memory-limits deny[msga] { wl := input[_] wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] - not request_or_limit_memory(container) - fixPaths := [ - {"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - ] + not container.resources.limits.memory + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [wl]}, + } +} + +# ================================== memory requests ================================== +# Fails if pod does not have container with memory requests +deny[msga] { + pod := input[_] + pod.kind == "Pod" + container := pod.spec.containers[i] + not container.resources.requests.memory + fixPaths := [{"path": sprintf("spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v does not have memory-limit or request", [container.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [pod]}, + } +} + +# Fails if workload does not have container with memory requests +deny[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + not container.resources.requests.memory + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [wl]}, + } +} + +# Fails if cronjob does not have container with memory requests +deny[msga] { + wl := input[_] + wl.kind == "CronJob" + container = wl.spec.jobTemplate.spec.template.spec.containers[i] + not container.resources.requests.memory + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), @@ -148,9 +196,7 @@ is_min_max_exceeded_memory(container) = "resources.limits.memory" { } else = "resources.requests.memory" { memory_req := container.resources.requests.memory is_req_exceeded_memory(memory_req) -} else = "" { - true -} +} else = "" is_limit_exceeded_memory(memory_limit) { is_min_limit_exceeded_memory(memory_limit) @@ -171,7 +217,7 @@ is_req_exceeded_memory(memory_req) { # helpers is_max_limit_exceeded_memory(memory_limit) { - memory_limit_max :=data.postureControlInputs.memory_limit_max[_] + memory_limit_max := data.postureControlInputs.memory_limit_max[_] compare_max(memory_limit_max, memory_limit) } @@ -197,24 +243,24 @@ is_min_request_exceeded_memory(memory_req) { compare_max(max, given) { endswith(max, "Mi") endswith(given, "Mi") - split_max := split(max, "Mi")[0] - split_given := split(given, "Mi")[0] + split_max := split(max, "Mi")[0] + split_given := split(given, "Mi")[0] split_given > split_max } compare_max(max, given) { endswith(max, "M") endswith(given, "M") - split_max := split(max, "M")[0] - split_given := split(given, "M")[0] + split_max := split(max, "M")[0] + split_given := split(given, "M")[0] split_given > split_max } compare_max(max, given) { endswith(max, "m") endswith(given, "m") - split_max := split(max, "m")[0] - split_given := split(given, "m")[0] + split_max := split(max, "m")[0] + split_given := split(given, "m")[0] split_given > split_max } @@ -224,31 +270,29 @@ compare_max(max, given) { given > max } - - ################ # Compare according to unit - min compare_min(min, given) { endswith(min, "Mi") endswith(given, "Mi") - split_min := split(min, "Mi")[0] - split_given := split(given, "Mi")[0] + split_min := split(min, "Mi")[0] + split_given := split(given, "Mi")[0] split_given < split_min } compare_min(min, given) { endswith(min, "M") endswith(given, "M") - split_min := split(min, "M")[0] - split_given := split(given, "M")[0] + split_min := split(min, "M")[0] + split_given := split(given, "M")[0] split_given < split_min } compare_min(min, given) { endswith(min, "m") endswith(given, "m") - split_min := split(min, "m")[0] - split_given := split(given, "m")[0] + split_min := split(min, "m")[0] + split_given := split(given, "m")[0] split_given < split_min } @@ -258,7 +302,6 @@ compare_min(min, given) { given < min } - # Check that is same unit is_special_measure(unit) { endswith(unit, "m") diff --git a/rules/resources-memory-limit-and-request/test/cronjob/expected.json b/rules/resources-memory-limit-and-request/test/cronjob/expected.json index 7ca8b207e..3444a7588 100644 --- a/rules/resources-memory-limit-and-request/test/cronjob/expected.json +++ b/rules/resources-memory-limit-and-request/test/cronjob/expected.json @@ -6,7 +6,27 @@ { "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.limits.memory", "value": "YOUR_VALUE" - }, + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } + }, + { + "alertMessage": "Container: hello in CronJob: hello does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ { "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.requests.memory", "value": "YOUR_VALUE" diff --git a/rules/resources-memory-limit-and-request/test/pod-only-limits/expected.json b/rules/resources-memory-limit-and-request/test/pod-only-limits/expected.json new file mode 100644 index 000000000..6b6a0addf --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod-only-limits/expected.json @@ -0,0 +1,20 @@ +[{ + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [{ + "path": "spec.containers[1].resources.limits.memory", + "value": "YOUR_VALUE" + }], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + }] + } +}] \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/test/pod-only-limits/input/pod.yaml b/rules/resources-memory-limit-and-request/test/pod-only-limits/input/pod.yaml new file mode 100644 index 000000000..7774dea5f --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod-only-limits/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + cpu: "500m" diff --git a/rules/resources-memory-limit-and-request/test/pod-only-requests/expected.json b/rules/resources-memory-limit-and-request/test/pod-only-requests/expected.json new file mode 100644 index 000000000..4648d72fc --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod-only-requests/expected.json @@ -0,0 +1,20 @@ +[{ + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [{ + "path": "spec.containers[1].resources.requests.memory", + "value": "YOUR_VALUE" + }], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + }] + } +}] \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/test/pod-only-requests/input/pod.yaml b/rules/resources-memory-limit-and-request/test/pod-only-requests/input/pod.yaml new file mode 100644 index 000000000..d146d134a --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod-only-requests/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" diff --git a/rules/resources-memory-limit-and-request/test/pod/expected.json b/rules/resources-memory-limit-and-request/test/pod/expected.json index 67802dc0e..7521c44a2 100644 --- a/rules/resources-memory-limit-and-request/test/pod/expected.json +++ b/rules/resources-memory-limit-and-request/test/pod/expected.json @@ -1,23 +1,50 @@ -[{ - "alertMessage": "Container: log-aggregator does not have memory-limit or request", - "failedPaths": [], - "fixPaths": [{ - "path": "spec.containers[1].resources.limits.memory", - "value": "YOUR_VALUE" - }, { - "path": "spec.containers[1].resources.requests.memory", - "value": "YOUR_VALUE" - }], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": "frontend" +[ + { + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[1].resources.limits.memory", + "value": "YOUR_VALUE" } - }] + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + }, + { + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[1].resources.requests.memory", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } } -}] \ No newline at end of file +] \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/test/workload/expected.json b/rules/resources-memory-limit-and-request/test/workload/expected.json index 123dc187f..35d582692 100644 --- a/rules/resources-memory-limit-and-request/test/workload/expected.json +++ b/rules/resources-memory-limit-and-request/test/workload/expected.json @@ -6,7 +6,30 @@ { "path": "spec.template.spec.containers[0].resources.limits.memory", "value": "YOUR_VALUE" - }, + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + }, + { + "alertMessage": "Container: app in Deployment: test does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ { "path": "spec.template.spec.containers[0].resources.requests.memory", "value": "YOUR_VALUE" From 26edf26188af984c46d013355213ea3f847c7126 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 18:59:29 +0300 Subject: [PATCH 009/195] fix C-0050 Signed-off-by: YiscahLevySilas1 --- .../resources-cpu-limit-and-request/raw.rego | 86 ++++++++++++++++--- .../test/cronjob/expected.json | 32 ++++++- .../test/pod-only-limits/expected.json | 22 +++++ .../test/pod-only-limits/input/pod.yaml | 23 +++++ .../test/pod-only-requests/expected.json | 21 +++++ .../test/pod-only-requests/input/pod.yaml | 23 +++++ .../test/pod/expected.json | 32 ++++++- .../test/workload/expected.json | 78 +++++++++++------ 8 files changed, 277 insertions(+), 40 deletions(-) create mode 100644 rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json create mode 100644 rules/resources-cpu-limit-and-request/test/pod-only-limits/input/pod.yaml create mode 100644 rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json create mode 100644 rules/resources-cpu-limit-and-request/test/pod-only-requests/input/pod.yaml diff --git a/rules/resources-cpu-limit-and-request/raw.rego b/rules/resources-cpu-limit-and-request/raw.rego index 317be212e..07f136ec4 100644 --- a/rules/resources-cpu-limit-and-request/raw.rego +++ b/rules/resources-cpu-limit-and-request/raw.rego @@ -1,14 +1,14 @@ package armo_builtins -# Fails if pod does not have container with CPU-limit or request +# ==================================== CPU requests ============================================= +# Fails if pod does not have container with CPU request deny[msga] { pod := input[_] pod.kind == "Pod" container := pod.spec.containers[i] - not request_or_limit_cpu(container) + not container.resources.requests.cpu - fixPaths := [{"path": sprintf("spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + fixPaths := [{"path": sprintf("spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v does not have CPU-limit or request", [ container.name]), @@ -22,16 +22,15 @@ deny[msga] { } } -# Fails if workload does not have container with CPU-limit or request +# Fails if workload does not have container with CPU requests deny[msga] { wl := input[_] spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} spec_template_spec_patterns[wl.kind] container := wl.spec.template.spec.containers[i] - not request_or_limit_cpu(container) + not container.resources.requests.cpu - fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.template.spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), @@ -45,15 +44,14 @@ deny[msga] { } } -# Fails if cronjob does not have container with CPU-limit or request +# Fails if cronjob does not have container with CPU requests deny[msga] { wl := input[_] wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] - not request_or_limit_cpu(container) + not container.resources.requests.cpu - fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), @@ -67,6 +65,70 @@ deny[msga] { } } +# ==================================== CPU limits ============================================= +# Fails if pod does not have container with CPU-limits +deny[msga] { + pod := input[_] + pod.kind == "Pod" + container := pod.spec.containers[i] + not container.resources.limits.cpu + + fixPaths := [{"path": sprintf("spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v does not have CPU-limit or request", [ container.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [pod] + } + } +} + +# Fails if workload does not have container with CPU-limits +deny[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + not container.resources.limits.cpu + + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [wl] + } + } +} + +# Fails if cronjob does not have container with CPU-limits +deny[msga] { + wl := input[_] + wl.kind == "CronJob" + container = wl.spec.jobTemplate.spec.template.spec.containers[i] + not container.resources.limits.cpu + + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [wl] + } + } +} diff --git a/rules/resources-cpu-limit-and-request/test/cronjob/expected.json b/rules/resources-cpu-limit-and-request/test/cronjob/expected.json index 28b1ab3ba..e03f943e8 100644 --- a/rules/resources-cpu-limit-and-request/test/cronjob/expected.json +++ b/rules/resources-cpu-limit-and-request/test/cronjob/expected.json @@ -2,8 +2,36 @@ { "alertMessage": "Container: hello in CronJob: hello does not have CPU-limit or request", "failedPaths": [], - "fixPaths" : [{"path": "spec.jobTemplate.spec.template.spec.containers[0].resources.limits.cpu", "value": "YOUR_VALUE"}, - {"path": "spec.jobTemplate.spec.template.spec.containers[0].resources.requests.cpu", "value": "YOUR_VALUE"}], + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.limits.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } + }, + { + "alertMessage": "Container: hello in CronJob: hello does not have CPU-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.requests.cpu", + "value": "YOUR_VALUE" + } + ], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, diff --git a/rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json b/rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json new file mode 100644 index 000000000..a19179dff --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json @@ -0,0 +1,22 @@ +[ + { + "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "failedPaths": [], + "fixPaths" : [{"path":"spec.containers[1].resources.limits.cpu", "value": "YOUR_VALUE"}], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + } +] + diff --git a/rules/resources-cpu-limit-and-request/test/pod-only-limits/input/pod.yaml b/rules/resources-cpu-limit-and-request/test/pod-only-limits/input/pod.yaml new file mode 100644 index 000000000..d1207f1bb --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/pod-only-limits/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" diff --git a/rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json b/rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json new file mode 100644 index 000000000..8a0bba75c --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json @@ -0,0 +1,21 @@ +[ + { + "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "failedPaths": [], + "fixPaths" : [{"path": "spec.containers[1].resources.requests.cpu", "value": "YOUR_VALUE"}], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-cpu-limit-and-request/test/pod-only-requests/input/pod.yaml b/rules/resources-cpu-limit-and-request/test/pod-only-requests/input/pod.yaml new file mode 100644 index 000000000..0495de5d3 --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/pod-only-requests/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + limits: + memory: "128Mi" + cpu: "500m" diff --git a/rules/resources-cpu-limit-and-request/test/pod/expected.json b/rules/resources-cpu-limit-and-request/test/pod/expected.json index 24a8f72bb..08f0190f9 100644 --- a/rules/resources-cpu-limit-and-request/test/pod/expected.json +++ b/rules/resources-cpu-limit-and-request/test/pod/expected.json @@ -2,8 +2,36 @@ { "alertMessage": "Container: log-aggregator does not have CPU-limit or request", "failedPaths": [], - "fixPaths" : [{"path":"spec.containers[1].resources.limits.cpu", "value": "YOUR_VALUE"}, - {"path": "spec.containers[1].resources.requests.cpu", "value": "YOUR_VALUE"}], + "fixPaths": [ + { + "path": "spec.containers[1].resources.limits.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + }, + { + "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[1].resources.requests.cpu", + "value": "YOUR_VALUE" + } + ], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, diff --git a/rules/resources-cpu-limit-and-request/test/workload/expected.json b/rules/resources-cpu-limit-and-request/test/workload/expected.json index aa8d65acf..0d4c0c19e 100644 --- a/rules/resources-cpu-limit-and-request/test/workload/expected.json +++ b/rules/resources-cpu-limit-and-request/test/workload/expected.json @@ -1,26 +1,56 @@ -[{ - "alertMessage": "Container: app in Deployment: test does not have CPU-limit or request", - "failedPaths": [], - "fixPaths": [{ - "path": "spec.template.spec.containers[0].resources.limits.cpu", - "value": "YOUR_VALUE" - }, { - "path": "spec.template.spec.containers[0].resources.requests.cpu", - "value": "YOUR_VALUE" - }], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "labels": { - "purpose": "demonstrate-command" - }, - "name": "test" +[ + { + "alertMessage": "Container: app in Deployment: test does not have CPU-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].resources.limits.cpu", + "value": "YOUR_VALUE" } - }] + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + }, + { + "alertMessage": "Container: app in Deployment: test does not have CPU-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].resources.requests.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } } -}] \ No newline at end of file +] \ No newline at end of file From cc7fb10a18a62c1ecebee35bc56c67445fcccd03 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 19:30:02 +0300 Subject: [PATCH 010/195] add missing reviewPaths Signed-off-by: YiscahLevySilas1 --- rules/image-pull-policy-is-not-set-to-always/raw.rego | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rules/image-pull-policy-is-not-set-to-always/raw.rego b/rules/image-pull-policy-is-not-set-to-always/raw.rego index d6a4e1fce..a54988bb8 100644 --- a/rules/image-pull-policy-is-not-set-to-always/raw.rego +++ b/rules/image-pull-policy-is-not-set-to-always/raw.rego @@ -11,6 +11,7 @@ deny[msga] { "alertMessage": sprintf("container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'", [container.name, pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 2, + "reviewPaths": paths, "failedPaths": paths, "fixPaths":[], "alertObject": { @@ -30,6 +31,7 @@ deny[msga] { "alertMessage": sprintf("container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 2, + "reviewPaths": paths, "failedPaths": paths, "fixPaths":[], "alertObject": { @@ -48,6 +50,7 @@ deny[msga] { "alertMessage": sprintf("container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'", [container.name, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 2, + "reviewPaths": paths, "failedPaths": paths, "fixPaths":[], "alertObject": { From 4e9925248d11820ca6607fcf9f87c5848e1d6d2c Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 21:57:26 +0300 Subject: [PATCH 011/195] add missing reviewPaths Signed-off-by: YiscahLevySilas1 --- rules/exec-into-container/raw.rego | 2 ++ rules/rule-can-impersonate-users-groups/raw.rego | 2 ++ rules/rule-can-list-get-secrets/raw.rego | 2 ++ rules/rule-can-portforward/raw.rego | 2 ++ rules/rule-can-ssh-to-pod/raw.rego | 2 ++ rules/rule-can-update-configmap/raw.rego | 2 ++ rules/rule-credentials-in-env-var/raw.rego | 5 +++++ 7 files changed, 17 insertions(+) diff --git a/rules/exec-into-container/raw.rego b/rules/exec-into-container/raw.rego index 2ddac11d1..c09d77250 100644 --- a/rules/exec-into-container/raw.rego +++ b/rules/exec-into-container/raw.rego @@ -61,6 +61,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can exec into containers", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -95,6 +96,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can exec into containers", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { diff --git a/rules/rule-can-impersonate-users-groups/raw.rego b/rules/rule-can-impersonate-users-groups/raw.rego index 4d2fcceac..8fe4e0589 100644 --- a/rules/rule-can-impersonate-users-groups/raw.rego +++ b/rules/rule-can-impersonate-users-groups/raw.rego @@ -53,6 +53,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can impersonate users", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -85,6 +86,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can impersonate users", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { diff --git a/rules/rule-can-list-get-secrets/raw.rego b/rules/rule-can-list-get-secrets/raw.rego index d9a8d65a6..f94f31ab1 100644 --- a/rules/rule-can-list-get-secrets/raw.rego +++ b/rules/rule-can-list-get-secrets/raw.rego @@ -59,6 +59,7 @@ deny[msga] { "alertMessage": sprintf("The following %v: %v can read secrets", [subject.kind, subject.name]), "alertScore": 9, "packagename": "armo_builtins", + "deletePaths": [path], "failedPaths": [path], "alertObject": { "k8sApiObjects": [role,rolebinding], @@ -91,6 +92,7 @@ deny[msga] { "alertMessage": sprintf("The following %v: %v can read secrets", [subject.kind, subject.name]), "alertScore": 9, "packagename": "armo_builtins", + "deletePaths": [path], "failedPaths": [path], "alertObject": { "k8sApiObjects": [role,clusterrolebinding], diff --git a/rules/rule-can-portforward/raw.rego b/rules/rule-can-portforward/raw.rego index 69ccb7a1c..b5fa53bf3 100644 --- a/rules/rule-can-portforward/raw.rego +++ b/rules/rule-can-portforward/raw.rego @@ -53,6 +53,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can do port forwarding", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -85,6 +86,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can do port forwarding", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { diff --git a/rules/rule-can-ssh-to-pod/raw.rego b/rules/rule-can-ssh-to-pod/raw.rego index 7421aaf05..34699995d 100644 --- a/rules/rule-can-ssh-to-pod/raw.rego +++ b/rules/rule-can-ssh-to-pod/raw.rego @@ -49,6 +49,7 @@ deny[msga] { "alertMessage": sprintf("%v: %v is exposed by SSH services: %v", [wl.kind, wl.metadata.name, service]), "packagename": "armo_builtins", "alertScore": 7, + "deletePaths": [path], "failedPaths": [path], "alertObject": { "k8sApiObjects": [wl,service] @@ -72,6 +73,7 @@ deny[msga] { "alertMessage": sprintf("%v: %v is exposed by SSH services: %v", [wl.kind, wl.metadata.name, service]), "packagename": "armo_builtins", "alertScore": 7, + "deletePaths": [path], "failedPaths": [path], "alertObject": { "k8sApiObjects": [wl,service] diff --git a/rules/rule-can-update-configmap/raw.rego b/rules/rule-can-update-configmap/raw.rego index 4cb945719..305e000d0 100644 --- a/rules/rule-can-update-configmap/raw.rego +++ b/rules/rule-can-update-configmap/raw.rego @@ -71,6 +71,7 @@ deny[msga] { msga := { "alertMessage": sprintf("The following %v: %v can modify 'coredns' configmap", [subject.kind, subject.name]), "alertScore": 6, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -112,6 +113,7 @@ deny[msga] { msga := { "alertMessage": sprintf("The following %v: %v can modify 'coredns' configmap", [subject.kind, subject.name]), "alertScore": 6, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { diff --git a/rules/rule-credentials-in-env-var/raw.rego b/rules/rule-credentials-in-env-var/raw.rego index 328efc25a..e81f1aefc 100644 --- a/rules/rule-credentials-in-env-var/raw.rego +++ b/rules/rule-credentials-in-env-var/raw.rego @@ -55,6 +55,7 @@ "alertMessage": sprintf("%v: %v has sensitive information in environment variables", [wl.kind, wl.metadata.name]), "alertScore": 9, "fixPaths": [], + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -86,6 +87,7 @@ "alertMessage": sprintf("Cronjob: %v has sensitive information in environment variables", [wl.metadata.name]), "alertScore": 9, "fixPaths": [], + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -116,6 +118,7 @@ deny[msga] { "alertMessage": sprintf("Pod: %v has sensitive information in environment variables", [pod.metadata.name]), "alertScore": 9, "fixPaths": [], + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -147,6 +150,7 @@ deny[msga] { "alertMessage": sprintf("%v: %v has sensitive information in environment variables", [wl.kind, wl.metadata.name]), "alertScore": 9, "fixPaths": [], + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -176,6 +180,7 @@ deny[msga] { "alertMessage": sprintf("Cronjob: %v has sensitive information in environment variables", [wl.metadata.name]), "alertScore": 9, "fixPaths": [], + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { From e05ceb43b0c2d23d8987137f53b9638fe8ebb973 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 21:58:12 +0300 Subject: [PATCH 012/195] change failedpath tp fixpath, which is preferable Signed-off-by: YiscahLevySilas1 --- rules/immutable-container-filesystem/raw.rego | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rules/immutable-container-filesystem/raw.rego b/rules/immutable-container-filesystem/raw.rego index 6f2d9aaff..2ea41df27 100644 --- a/rules/immutable-container-filesystem/raw.rego +++ b/rules/immutable-container-filesystem/raw.rego @@ -70,8 +70,8 @@ deny[msga] { # Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec is_mutable_filesystem(container, start_of_path, i) = [failed_path, fixPath] { container.securityContext.readOnlyRootFilesystem == false - failed_path = sprintf("%vcontainers[%v].securityContext.readOnlyRootFilesystem", [start_of_path, format_int(i, 10)]) - fixPath = "" + fixPath = {"path": sprintf("%vcontainers[%v].securityContext.readOnlyRootFilesystem", [start_of_path, format_int(i, 10)]), "value": "true"} + failed_path = "" } is_mutable_filesystem(container, start_of_path, i) = [failed_path, fixPath] { From cdff38c4fa794a27bc1fc362c8eb33c5c89076b9 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 17 Oct 2023 09:44:11 +0300 Subject: [PATCH 013/195] fix test Signed-off-by: YiscahLevySilas1 --- .../test/workloads/expected.json | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/rules/immutable-container-filesystem/test/workloads/expected.json b/rules/immutable-container-filesystem/test/workloads/expected.json index d54293443..fb11fba4b 100644 --- a/rules/immutable-container-filesystem/test/workloads/expected.json +++ b/rules/immutable-container-filesystem/test/workloads/expected.json @@ -1,7 +1,10 @@ [{ "alertMessage": "container :mysql in Deployment: my-deployment has mutable filesystem", - "failedPaths": ["spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem"], - "fixPaths": [], + "failedPaths": [], + "fixPaths": [{ + "path": "spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem", + "value": "true" + }], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, From 13bffc0ae181d4fa4cc68284dfe6e6c7038e348c Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 19:30:02 +0300 Subject: [PATCH 014/195] add missing reviewPaths Signed-off-by: YiscahLevySilas1 --- rules/image-pull-policy-is-not-set-to-always/raw.rego | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rules/image-pull-policy-is-not-set-to-always/raw.rego b/rules/image-pull-policy-is-not-set-to-always/raw.rego index d6a4e1fce..a54988bb8 100644 --- a/rules/image-pull-policy-is-not-set-to-always/raw.rego +++ b/rules/image-pull-policy-is-not-set-to-always/raw.rego @@ -11,6 +11,7 @@ deny[msga] { "alertMessage": sprintf("container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'", [container.name, pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 2, + "reviewPaths": paths, "failedPaths": paths, "fixPaths":[], "alertObject": { @@ -30,6 +31,7 @@ deny[msga] { "alertMessage": sprintf("container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 2, + "reviewPaths": paths, "failedPaths": paths, "fixPaths":[], "alertObject": { @@ -48,6 +50,7 @@ deny[msga] { "alertMessage": sprintf("container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'", [container.name, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 2, + "reviewPaths": paths, "failedPaths": paths, "fixPaths":[], "alertObject": { From 273ca4f0747208f82d1d3e490a1b005f1e740c23 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 21:57:26 +0300 Subject: [PATCH 015/195] add missing reviewPaths Signed-off-by: YiscahLevySilas1 --- rules/exec-into-container/raw.rego | 2 ++ rules/rule-can-impersonate-users-groups/raw.rego | 2 ++ rules/rule-can-list-get-secrets/raw.rego | 2 ++ rules/rule-can-portforward/raw.rego | 2 ++ rules/rule-can-ssh-to-pod/raw.rego | 2 ++ rules/rule-can-update-configmap/raw.rego | 2 ++ rules/rule-credentials-in-env-var/raw.rego | 5 +++++ 7 files changed, 17 insertions(+) diff --git a/rules/exec-into-container/raw.rego b/rules/exec-into-container/raw.rego index 2ddac11d1..c09d77250 100644 --- a/rules/exec-into-container/raw.rego +++ b/rules/exec-into-container/raw.rego @@ -61,6 +61,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can exec into containers", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -95,6 +96,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can exec into containers", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { diff --git a/rules/rule-can-impersonate-users-groups/raw.rego b/rules/rule-can-impersonate-users-groups/raw.rego index 4d2fcceac..8fe4e0589 100644 --- a/rules/rule-can-impersonate-users-groups/raw.rego +++ b/rules/rule-can-impersonate-users-groups/raw.rego @@ -53,6 +53,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can impersonate users", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -85,6 +86,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can impersonate users", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { diff --git a/rules/rule-can-list-get-secrets/raw.rego b/rules/rule-can-list-get-secrets/raw.rego index d9a8d65a6..f94f31ab1 100644 --- a/rules/rule-can-list-get-secrets/raw.rego +++ b/rules/rule-can-list-get-secrets/raw.rego @@ -59,6 +59,7 @@ deny[msga] { "alertMessage": sprintf("The following %v: %v can read secrets", [subject.kind, subject.name]), "alertScore": 9, "packagename": "armo_builtins", + "deletePaths": [path], "failedPaths": [path], "alertObject": { "k8sApiObjects": [role,rolebinding], @@ -91,6 +92,7 @@ deny[msga] { "alertMessage": sprintf("The following %v: %v can read secrets", [subject.kind, subject.name]), "alertScore": 9, "packagename": "armo_builtins", + "deletePaths": [path], "failedPaths": [path], "alertObject": { "k8sApiObjects": [role,clusterrolebinding], diff --git a/rules/rule-can-portforward/raw.rego b/rules/rule-can-portforward/raw.rego index 69ccb7a1c..b5fa53bf3 100644 --- a/rules/rule-can-portforward/raw.rego +++ b/rules/rule-can-portforward/raw.rego @@ -53,6 +53,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can do port forwarding", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -85,6 +86,7 @@ deny[msga] { msga := { "alertMessage": sprintf("the following %v: %v, can do port forwarding", [subject.kind, subject.name]), "alertScore": 9, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { diff --git a/rules/rule-can-ssh-to-pod/raw.rego b/rules/rule-can-ssh-to-pod/raw.rego index 7421aaf05..34699995d 100644 --- a/rules/rule-can-ssh-to-pod/raw.rego +++ b/rules/rule-can-ssh-to-pod/raw.rego @@ -49,6 +49,7 @@ deny[msga] { "alertMessage": sprintf("%v: %v is exposed by SSH services: %v", [wl.kind, wl.metadata.name, service]), "packagename": "armo_builtins", "alertScore": 7, + "deletePaths": [path], "failedPaths": [path], "alertObject": { "k8sApiObjects": [wl,service] @@ -72,6 +73,7 @@ deny[msga] { "alertMessage": sprintf("%v: %v is exposed by SSH services: %v", [wl.kind, wl.metadata.name, service]), "packagename": "armo_builtins", "alertScore": 7, + "deletePaths": [path], "failedPaths": [path], "alertObject": { "k8sApiObjects": [wl,service] diff --git a/rules/rule-can-update-configmap/raw.rego b/rules/rule-can-update-configmap/raw.rego index 4cb945719..305e000d0 100644 --- a/rules/rule-can-update-configmap/raw.rego +++ b/rules/rule-can-update-configmap/raw.rego @@ -71,6 +71,7 @@ deny[msga] { msga := { "alertMessage": sprintf("The following %v: %v can modify 'coredns' configmap", [subject.kind, subject.name]), "alertScore": 6, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -112,6 +113,7 @@ deny[msga] { msga := { "alertMessage": sprintf("The following %v: %v can modify 'coredns' configmap", [subject.kind, subject.name]), "alertScore": 6, + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { diff --git a/rules/rule-credentials-in-env-var/raw.rego b/rules/rule-credentials-in-env-var/raw.rego index 328efc25a..e81f1aefc 100644 --- a/rules/rule-credentials-in-env-var/raw.rego +++ b/rules/rule-credentials-in-env-var/raw.rego @@ -55,6 +55,7 @@ "alertMessage": sprintf("%v: %v has sensitive information in environment variables", [wl.kind, wl.metadata.name]), "alertScore": 9, "fixPaths": [], + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -86,6 +87,7 @@ "alertMessage": sprintf("Cronjob: %v has sensitive information in environment variables", [wl.metadata.name]), "alertScore": 9, "fixPaths": [], + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -116,6 +118,7 @@ deny[msga] { "alertMessage": sprintf("Pod: %v has sensitive information in environment variables", [pod.metadata.name]), "alertScore": 9, "fixPaths": [], + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -147,6 +150,7 @@ deny[msga] { "alertMessage": sprintf("%v: %v has sensitive information in environment variables", [wl.kind, wl.metadata.name]), "alertScore": 9, "fixPaths": [], + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { @@ -176,6 +180,7 @@ deny[msga] { "alertMessage": sprintf("Cronjob: %v has sensitive information in environment variables", [wl.metadata.name]), "alertScore": 9, "fixPaths": [], + "deletePaths": [path], "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { From d731ff7e1be1936b61a1f19f87504602a2df44ef Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 21:58:12 +0300 Subject: [PATCH 016/195] change failedpath tp fixpath, which is preferable Signed-off-by: YiscahLevySilas1 --- rules/immutable-container-filesystem/raw.rego | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rules/immutable-container-filesystem/raw.rego b/rules/immutable-container-filesystem/raw.rego index 6f2d9aaff..2ea41df27 100644 --- a/rules/immutable-container-filesystem/raw.rego +++ b/rules/immutable-container-filesystem/raw.rego @@ -70,8 +70,8 @@ deny[msga] { # Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec is_mutable_filesystem(container, start_of_path, i) = [failed_path, fixPath] { container.securityContext.readOnlyRootFilesystem == false - failed_path = sprintf("%vcontainers[%v].securityContext.readOnlyRootFilesystem", [start_of_path, format_int(i, 10)]) - fixPath = "" + fixPath = {"path": sprintf("%vcontainers[%v].securityContext.readOnlyRootFilesystem", [start_of_path, format_int(i, 10)]), "value": "true"} + failed_path = "" } is_mutable_filesystem(container, start_of_path, i) = [failed_path, fixPath] { From 0a9f71cd5659c2f05508e7b7b67142e9c0ce1f75 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 17 Oct 2023 09:44:11 +0300 Subject: [PATCH 017/195] fix test Signed-off-by: YiscahLevySilas1 --- .../test/workloads/expected.json | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/rules/immutable-container-filesystem/test/workloads/expected.json b/rules/immutable-container-filesystem/test/workloads/expected.json index d54293443..fb11fba4b 100644 --- a/rules/immutable-container-filesystem/test/workloads/expected.json +++ b/rules/immutable-container-filesystem/test/workloads/expected.json @@ -1,7 +1,10 @@ [{ "alertMessage": "container :mysql in Deployment: my-deployment has mutable filesystem", - "failedPaths": ["spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem"], - "fixPaths": [], + "failedPaths": [], + "fixPaths": [{ + "path": "spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem", + "value": "true" + }], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, From 327896448ffea629fc358b2351b24aab93f00707 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 18:33:07 +0300 Subject: [PATCH 018/195] fix control C-0004 Signed-off-by: YiscahLevySilas1 --- .../raw.rego | 117 ++++++++++++------ .../test/cronjob/expected.json | 22 +++- .../test/pod-only-limits/expected.json | 20 +++ .../test/pod-only-limits/input/pod.yaml | 23 ++++ .../test/pod-only-requests/expected.json | 20 +++ .../test/pod-only-requests/input/pod.yaml | 23 ++++ .../test/pod/expected.json | 69 +++++++---- .../test/workload/expected.json | 25 +++- 8 files changed, 259 insertions(+), 60 deletions(-) create mode 100644 rules/resources-memory-limit-and-request/test/pod-only-limits/expected.json create mode 100644 rules/resources-memory-limit-and-request/test/pod-only-limits/input/pod.yaml create mode 100644 rules/resources-memory-limit-and-request/test/pod-only-requests/expected.json create mode 100644 rules/resources-memory-limit-and-request/test/pod-only-requests/input/pod.yaml diff --git a/rules/resources-memory-limit-and-request/raw.rego b/rules/resources-memory-limit-and-request/raw.rego index cf1c9f289..a82209c01 100644 --- a/rules/resources-memory-limit-and-request/raw.rego +++ b/rules/resources-memory-limit-and-request/raw.rego @@ -1,15 +1,13 @@ package armo_builtins -# Fails if pod does not have container with memory-limit or request +# ================================== memory limits ================================== +# Fails if pod does not have container with memory-limits deny[msga] { pod := input[_] pod.kind == "Pod" container := pod.spec.containers[i] - not request_or_limit_memory(container) - fixPaths := [ - {"path": sprintf("spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - ] + not container.resources.limits.memory + fixPaths := [{"path": sprintf("spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v does not have memory-limit or request", [container.name]), @@ -21,17 +19,14 @@ deny[msga] { } } -# Fails if workload does not have container with memory-limit or request +# Fails if workload does not have container with memory-limits deny[msga] { wl := input[_] spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} spec_template_spec_patterns[wl.kind] container := wl.spec.template.spec.containers[i] - not request_or_limit_memory(container) - fixPaths := [ - {"path": sprintf("spec.template.spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.template.spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - ] + not container.resources.limits.memory + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), @@ -43,16 +38,69 @@ deny[msga] { } } -# Fails if cronjob does not have container with memory-limit or request +# Fails if cronjob does not have container with memory-limits deny[msga] { wl := input[_] wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] - not request_or_limit_memory(container) - fixPaths := [ - {"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - ] + not container.resources.limits.memory + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [wl]}, + } +} + +# ================================== memory requests ================================== +# Fails if pod does not have container with memory requests +deny[msga] { + pod := input[_] + pod.kind == "Pod" + container := pod.spec.containers[i] + not container.resources.requests.memory + fixPaths := [{"path": sprintf("spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v does not have memory-limit or request", [container.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [pod]}, + } +} + +# Fails if workload does not have container with memory requests +deny[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + not container.resources.requests.memory + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [wl]}, + } +} + +# Fails if cronjob does not have container with memory requests +deny[msga] { + wl := input[_] + wl.kind == "CronJob" + container = wl.spec.jobTemplate.spec.template.spec.containers[i] + not container.resources.requests.memory + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), @@ -148,9 +196,7 @@ is_min_max_exceeded_memory(container) = "resources.limits.memory" { } else = "resources.requests.memory" { memory_req := container.resources.requests.memory is_req_exceeded_memory(memory_req) -} else = "" { - true -} +} else = "" is_limit_exceeded_memory(memory_limit) { is_min_limit_exceeded_memory(memory_limit) @@ -171,7 +217,7 @@ is_req_exceeded_memory(memory_req) { # helpers is_max_limit_exceeded_memory(memory_limit) { - memory_limit_max :=data.postureControlInputs.memory_limit_max[_] + memory_limit_max := data.postureControlInputs.memory_limit_max[_] compare_max(memory_limit_max, memory_limit) } @@ -197,24 +243,24 @@ is_min_request_exceeded_memory(memory_req) { compare_max(max, given) { endswith(max, "Mi") endswith(given, "Mi") - split_max := split(max, "Mi")[0] - split_given := split(given, "Mi")[0] + split_max := split(max, "Mi")[0] + split_given := split(given, "Mi")[0] split_given > split_max } compare_max(max, given) { endswith(max, "M") endswith(given, "M") - split_max := split(max, "M")[0] - split_given := split(given, "M")[0] + split_max := split(max, "M")[0] + split_given := split(given, "M")[0] split_given > split_max } compare_max(max, given) { endswith(max, "m") endswith(given, "m") - split_max := split(max, "m")[0] - split_given := split(given, "m")[0] + split_max := split(max, "m")[0] + split_given := split(given, "m")[0] split_given > split_max } @@ -224,31 +270,29 @@ compare_max(max, given) { given > max } - - ################ # Compare according to unit - min compare_min(min, given) { endswith(min, "Mi") endswith(given, "Mi") - split_min := split(min, "Mi")[0] - split_given := split(given, "Mi")[0] + split_min := split(min, "Mi")[0] + split_given := split(given, "Mi")[0] split_given < split_min } compare_min(min, given) { endswith(min, "M") endswith(given, "M") - split_min := split(min, "M")[0] - split_given := split(given, "M")[0] + split_min := split(min, "M")[0] + split_given := split(given, "M")[0] split_given < split_min } compare_min(min, given) { endswith(min, "m") endswith(given, "m") - split_min := split(min, "m")[0] - split_given := split(given, "m")[0] + split_min := split(min, "m")[0] + split_given := split(given, "m")[0] split_given < split_min } @@ -258,7 +302,6 @@ compare_min(min, given) { given < min } - # Check that is same unit is_special_measure(unit) { endswith(unit, "m") diff --git a/rules/resources-memory-limit-and-request/test/cronjob/expected.json b/rules/resources-memory-limit-and-request/test/cronjob/expected.json index 7ca8b207e..3444a7588 100644 --- a/rules/resources-memory-limit-and-request/test/cronjob/expected.json +++ b/rules/resources-memory-limit-and-request/test/cronjob/expected.json @@ -6,7 +6,27 @@ { "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.limits.memory", "value": "YOUR_VALUE" - }, + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } + }, + { + "alertMessage": "Container: hello in CronJob: hello does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ { "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.requests.memory", "value": "YOUR_VALUE" diff --git a/rules/resources-memory-limit-and-request/test/pod-only-limits/expected.json b/rules/resources-memory-limit-and-request/test/pod-only-limits/expected.json new file mode 100644 index 000000000..6b6a0addf --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod-only-limits/expected.json @@ -0,0 +1,20 @@ +[{ + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [{ + "path": "spec.containers[1].resources.limits.memory", + "value": "YOUR_VALUE" + }], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + }] + } +}] \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/test/pod-only-limits/input/pod.yaml b/rules/resources-memory-limit-and-request/test/pod-only-limits/input/pod.yaml new file mode 100644 index 000000000..7774dea5f --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod-only-limits/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + cpu: "500m" diff --git a/rules/resources-memory-limit-and-request/test/pod-only-requests/expected.json b/rules/resources-memory-limit-and-request/test/pod-only-requests/expected.json new file mode 100644 index 000000000..4648d72fc --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod-only-requests/expected.json @@ -0,0 +1,20 @@ +[{ + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [{ + "path": "spec.containers[1].resources.requests.memory", + "value": "YOUR_VALUE" + }], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + }] + } +}] \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/test/pod-only-requests/input/pod.yaml b/rules/resources-memory-limit-and-request/test/pod-only-requests/input/pod.yaml new file mode 100644 index 000000000..d146d134a --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod-only-requests/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" diff --git a/rules/resources-memory-limit-and-request/test/pod/expected.json b/rules/resources-memory-limit-and-request/test/pod/expected.json index 67802dc0e..7521c44a2 100644 --- a/rules/resources-memory-limit-and-request/test/pod/expected.json +++ b/rules/resources-memory-limit-and-request/test/pod/expected.json @@ -1,23 +1,50 @@ -[{ - "alertMessage": "Container: log-aggregator does not have memory-limit or request", - "failedPaths": [], - "fixPaths": [{ - "path": "spec.containers[1].resources.limits.memory", - "value": "YOUR_VALUE" - }, { - "path": "spec.containers[1].resources.requests.memory", - "value": "YOUR_VALUE" - }], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": "frontend" +[ + { + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[1].resources.limits.memory", + "value": "YOUR_VALUE" } - }] + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + }, + { + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[1].resources.requests.memory", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } } -}] \ No newline at end of file +] \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/test/workload/expected.json b/rules/resources-memory-limit-and-request/test/workload/expected.json index 123dc187f..35d582692 100644 --- a/rules/resources-memory-limit-and-request/test/workload/expected.json +++ b/rules/resources-memory-limit-and-request/test/workload/expected.json @@ -6,7 +6,30 @@ { "path": "spec.template.spec.containers[0].resources.limits.memory", "value": "YOUR_VALUE" - }, + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + }, + { + "alertMessage": "Container: app in Deployment: test does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ { "path": "spec.template.spec.containers[0].resources.requests.memory", "value": "YOUR_VALUE" From 3c8ea0a0fe16566f560ced1e0d925363ca1d461b Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 16 Oct 2023 18:59:29 +0300 Subject: [PATCH 019/195] fix C-0050 Signed-off-by: YiscahLevySilas1 --- .../resources-cpu-limit-and-request/raw.rego | 86 ++++++++++++++++--- .../test/cronjob/expected.json | 32 ++++++- .../test/pod-only-limits/expected.json | 22 +++++ .../test/pod-only-limits/input/pod.yaml | 23 +++++ .../test/pod-only-requests/expected.json | 21 +++++ .../test/pod-only-requests/input/pod.yaml | 23 +++++ .../test/pod/expected.json | 32 ++++++- .../test/workload/expected.json | 78 +++++++++++------ 8 files changed, 277 insertions(+), 40 deletions(-) create mode 100644 rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json create mode 100644 rules/resources-cpu-limit-and-request/test/pod-only-limits/input/pod.yaml create mode 100644 rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json create mode 100644 rules/resources-cpu-limit-and-request/test/pod-only-requests/input/pod.yaml diff --git a/rules/resources-cpu-limit-and-request/raw.rego b/rules/resources-cpu-limit-and-request/raw.rego index 317be212e..07f136ec4 100644 --- a/rules/resources-cpu-limit-and-request/raw.rego +++ b/rules/resources-cpu-limit-and-request/raw.rego @@ -1,14 +1,14 @@ package armo_builtins -# Fails if pod does not have container with CPU-limit or request +# ==================================== CPU requests ============================================= +# Fails if pod does not have container with CPU request deny[msga] { pod := input[_] pod.kind == "Pod" container := pod.spec.containers[i] - not request_or_limit_cpu(container) + not container.resources.requests.cpu - fixPaths := [{"path": sprintf("spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + fixPaths := [{"path": sprintf("spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v does not have CPU-limit or request", [ container.name]), @@ -22,16 +22,15 @@ deny[msga] { } } -# Fails if workload does not have container with CPU-limit or request +# Fails if workload does not have container with CPU requests deny[msga] { wl := input[_] spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} spec_template_spec_patterns[wl.kind] container := wl.spec.template.spec.containers[i] - not request_or_limit_cpu(container) + not container.resources.requests.cpu - fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.template.spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), @@ -45,15 +44,14 @@ deny[msga] { } } -# Fails if cronjob does not have container with CPU-limit or request +# Fails if cronjob does not have container with CPU requests deny[msga] { wl := input[_] wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] - not request_or_limit_cpu(container) + not container.resources.requests.cpu - fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}, - {"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), @@ -67,6 +65,70 @@ deny[msga] { } } +# ==================================== CPU limits ============================================= +# Fails if pod does not have container with CPU-limits +deny[msga] { + pod := input[_] + pod.kind == "Pod" + container := pod.spec.containers[i] + not container.resources.limits.cpu + + fixPaths := [{"path": sprintf("spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v does not have CPU-limit or request", [ container.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [pod] + } + } +} + +# Fails if workload does not have container with CPU-limits +deny[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + not container.resources.limits.cpu + + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [wl] + } + } +} + +# Fails if cronjob does not have container with CPU-limits +deny[msga] { + wl := input[_] + wl.kind == "CronJob" + container = wl.spec.jobTemplate.spec.template.spec.containers[i] + not container.resources.limits.cpu + + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [wl] + } + } +} diff --git a/rules/resources-cpu-limit-and-request/test/cronjob/expected.json b/rules/resources-cpu-limit-and-request/test/cronjob/expected.json index 28b1ab3ba..e03f943e8 100644 --- a/rules/resources-cpu-limit-and-request/test/cronjob/expected.json +++ b/rules/resources-cpu-limit-and-request/test/cronjob/expected.json @@ -2,8 +2,36 @@ { "alertMessage": "Container: hello in CronJob: hello does not have CPU-limit or request", "failedPaths": [], - "fixPaths" : [{"path": "spec.jobTemplate.spec.template.spec.containers[0].resources.limits.cpu", "value": "YOUR_VALUE"}, - {"path": "spec.jobTemplate.spec.template.spec.containers[0].resources.requests.cpu", "value": "YOUR_VALUE"}], + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.limits.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } + }, + { + "alertMessage": "Container: hello in CronJob: hello does not have CPU-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.requests.cpu", + "value": "YOUR_VALUE" + } + ], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, diff --git a/rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json b/rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json new file mode 100644 index 000000000..a19179dff --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json @@ -0,0 +1,22 @@ +[ + { + "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "failedPaths": [], + "fixPaths" : [{"path":"spec.containers[1].resources.limits.cpu", "value": "YOUR_VALUE"}], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + } +] + diff --git a/rules/resources-cpu-limit-and-request/test/pod-only-limits/input/pod.yaml b/rules/resources-cpu-limit-and-request/test/pod-only-limits/input/pod.yaml new file mode 100644 index 000000000..d1207f1bb --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/pod-only-limits/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" diff --git a/rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json b/rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json new file mode 100644 index 000000000..8a0bba75c --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json @@ -0,0 +1,21 @@ +[ + { + "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "failedPaths": [], + "fixPaths" : [{"path": "spec.containers[1].resources.requests.cpu", "value": "YOUR_VALUE"}], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-cpu-limit-and-request/test/pod-only-requests/input/pod.yaml b/rules/resources-cpu-limit-and-request/test/pod-only-requests/input/pod.yaml new file mode 100644 index 000000000..0495de5d3 --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/pod-only-requests/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + limits: + memory: "128Mi" + cpu: "500m" diff --git a/rules/resources-cpu-limit-and-request/test/pod/expected.json b/rules/resources-cpu-limit-and-request/test/pod/expected.json index 24a8f72bb..08f0190f9 100644 --- a/rules/resources-cpu-limit-and-request/test/pod/expected.json +++ b/rules/resources-cpu-limit-and-request/test/pod/expected.json @@ -2,8 +2,36 @@ { "alertMessage": "Container: log-aggregator does not have CPU-limit or request", "failedPaths": [], - "fixPaths" : [{"path":"spec.containers[1].resources.limits.cpu", "value": "YOUR_VALUE"}, - {"path": "spec.containers[1].resources.requests.cpu", "value": "YOUR_VALUE"}], + "fixPaths": [ + { + "path": "spec.containers[1].resources.limits.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + }, + { + "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[1].resources.requests.cpu", + "value": "YOUR_VALUE" + } + ], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, diff --git a/rules/resources-cpu-limit-and-request/test/workload/expected.json b/rules/resources-cpu-limit-and-request/test/workload/expected.json index aa8d65acf..0d4c0c19e 100644 --- a/rules/resources-cpu-limit-and-request/test/workload/expected.json +++ b/rules/resources-cpu-limit-and-request/test/workload/expected.json @@ -1,26 +1,56 @@ -[{ - "alertMessage": "Container: app in Deployment: test does not have CPU-limit or request", - "failedPaths": [], - "fixPaths": [{ - "path": "spec.template.spec.containers[0].resources.limits.cpu", - "value": "YOUR_VALUE" - }, { - "path": "spec.template.spec.containers[0].resources.requests.cpu", - "value": "YOUR_VALUE" - }], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "labels": { - "purpose": "demonstrate-command" - }, - "name": "test" +[ + { + "alertMessage": "Container: app in Deployment: test does not have CPU-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].resources.limits.cpu", + "value": "YOUR_VALUE" } - }] + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + }, + { + "alertMessage": "Container: app in Deployment: test does not have CPU-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].resources.requests.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } } -}] \ No newline at end of file +] \ No newline at end of file From f495c4f023d68bf4cf719ef6815d48fd95190cd1 Mon Sep 17 00:00:00 2001 From: rcohencyberarmor Date: Tue, 17 Oct 2023 12:28:36 +0300 Subject: [PATCH 020/195] add file scope for specific controls Signed-off-by: rcohencyberarmor --- controls/C-0186-minimizeaccesstosecrets.json | 3 ++- controls/C-0187-minimizewildcarduseinrolesandclusterroles.json | 3 ++- controls/C-0188-minimizeaccesstocreatepods.json | 3 ++- ...189-ensurethatdefaultserviceaccountsarenotactivelyused.json | 3 ++- ...rethatserviceaccounttokensareonlymountedwherenecessary.json | 3 ++- ...mpersonateandescalatepermissionsinthekubernetescluster.json | 3 ++- ...lusterhasatleastoneactivepolicycontrolmechanisminplace.json | 3 ++- .../C-0193-minimizetheadmissionofprivilegedcontainers.json | 3 ++- ...ionofcontainerswishingtosharethehostprocessidnamespace.json | 3 ++- ...admissionofcontainerswishingtosharethehostipcnamespace.json | 3 ++- ...ssionofcontainerswishingtosharethehostnetworknamespace.json | 3 ++- ...zetheadmissionofcontainerswithallowprivilegeescalation.json | 3 ++- controls/C-0198-minimizetheadmissionofrootcontainers.json | 3 ++- ...nimizetheadmissionofcontainerswiththenet_rawcapability.json | 3 ++- ...-minimizetheadmissionofcontainerswithaddedcapabilities.json | 3 ++- ...nimizetheadmissionofcontainerswithcapabilitiesassigned.json | 3 ++- ...202-minimizetheadmissionofwindowshostprocesscontainers.json | 3 ++- controls/C-0203-minimizetheadmissionofhostpathvolumes.json | 3 ++- ...0204-minimizetheadmissionofcontainerswhichusehostports.json | 3 ++- ...0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json | 3 ++- ...erusingsecretsasfilesoversecretsasenvironmentvariables.json | 3 ++- ...seccompprofileissettodockerdefaultinyourpoddefinitions.json | 3 ++- .../C-0211-applysecuritycontexttoyourpodsandcontainers.json | 3 ++- 23 files changed, 46 insertions(+), 23 deletions(-) diff --git a/controls/C-0186-minimizeaccesstosecrets.json b/controls/C-0186-minimizeaccesstosecrets.json index 27caec6c8..d8f0fc3bb 100644 --- a/controls/C-0186-minimizeaccesstosecrets.json +++ b/controls/C-0186-minimizeaccesstosecrets.json @@ -23,7 +23,8 @@ "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json b/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json index df7589ece..1a09b3e9c 100644 --- a/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json +++ b/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0188-minimizeaccesstocreatepods.json b/controls/C-0188-minimizeaccesstocreatepods.json index e88f6bfff..78da8ae95 100644 --- a/controls/C-0188-minimizeaccesstocreatepods.json +++ b/controls/C-0188-minimizeaccesstocreatepods.json @@ -23,7 +23,8 @@ "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json b/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json index 94bfb33f7..1e1a560b3 100644 --- a/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json +++ b/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json @@ -24,7 +24,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json b/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json index 14609b37b..8f69e21cb 100644 --- a/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json +++ b/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json b/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json index 9583d658c..799121bf5 100644 --- a/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json +++ b/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json b/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json index 9e455bd40..fedc5df46 100644 --- a/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json +++ b/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json @@ -24,7 +24,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json b/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json index 20b34595f..5ddbdc34b 100644 --- a/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json +++ b/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json b/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json index 622a4f79b..a3cba616a 100644 --- a/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json +++ b/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json b/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json index ca8366d23..26f63ca44 100644 --- a/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json +++ b/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json b/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json index 9fd2f5389..2e4d69dd0 100644 --- a/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json +++ b/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json b/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json index 159116541..811f1726d 100644 --- a/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json +++ b/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0198-minimizetheadmissionofrootcontainers.json b/controls/C-0198-minimizetheadmissionofrootcontainers.json index 069ff1e80..c837df9cf 100644 --- a/controls/C-0198-minimizetheadmissionofrootcontainers.json +++ b/controls/C-0198-minimizetheadmissionofrootcontainers.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json b/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json index 30f8d3640..76f59df0d 100644 --- a/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json +++ b/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json b/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json index 851624db0..c106ea32c 100644 --- a/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json +++ b/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json b/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json index d90d4800a..18b9a72b3 100644 --- a/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json +++ b/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json b/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json index 7f77bd3de..eae222b48 100644 --- a/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json +++ b/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0203-minimizetheadmissionofhostpathvolumes.json b/controls/C-0203-minimizetheadmissionofhostpathvolumes.json index c6e32149a..6cd23d1f5 100644 --- a/controls/C-0203-minimizetheadmissionofhostpathvolumes.json +++ b/controls/C-0203-minimizetheadmissionofhostpathvolumes.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json b/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json index 3fd35c822..9f3c5c839 100644 --- a/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json +++ b/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json b/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json index 4a5098c85..fb452754e 100644 --- a/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json +++ b/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json b/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json index 133eeb89e..bf3a7c610 100644 --- a/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json +++ b/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json @@ -26,7 +26,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json b/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json index 61ef46bfc..eb94bb243 100644 --- a/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json +++ b/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json @@ -23,7 +23,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file diff --git a/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json b/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json index e1ec5c70d..d51668858 100644 --- a/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json +++ b/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json @@ -45,7 +45,8 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster", + "file" ] } } \ No newline at end of file From 752466e8d45674a298dc5666cd949f8a551e48cc Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 17 Oct 2023 16:55:43 +0300 Subject: [PATCH 021/195] separate limit and request cases so both paths can be sent Signed-off-by: YiscahLevySilas1 --- .../resources-cpu-limit-and-request/raw.rego | 99 ++++++++++++++-- .../raw.rego | 112 ++++++++++++++---- 2 files changed, 181 insertions(+), 30 deletions(-) diff --git a/rules/resources-cpu-limit-and-request/raw.rego b/rules/resources-cpu-limit-and-request/raw.rego index 07f136ec4..3cfbac051 100644 --- a/rules/resources-cpu-limit-and-request/raw.rego +++ b/rules/resources-cpu-limit-and-request/raw.rego @@ -1,6 +1,6 @@ package armo_builtins -# ==================================== CPU requests ============================================= +# ==================================== no CPU requests ============================================= # Fails if pod does not have container with CPU request deny[msga] { pod := input[_] @@ -65,7 +65,7 @@ deny[msga] { } } -# ==================================== CPU limits ============================================= +# ==================================== no CPU limits ============================================= # Fails if pod does not have container with CPU-limits deny[msga] { pod := input[_] @@ -132,7 +132,7 @@ deny[msga] { -################################################################################################################### +# ============================================= cpu limits exceed min/max ============================================= # Fails if pod exceeds CPU-limit or request deny[msga] { @@ -140,8 +140,9 @@ deny[msga] { pod.kind == "Pod" container := pod.spec.containers[i] request_or_limit_cpu(container) - resource := is_min_max_exceeded_cpu(container) - resource != "" + resource := "resources.limits.cpu" + cpu_limit := container.resources.limits.cpu + is_limit_exceeded_cpu(cpu_limit) failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), resource]) @@ -166,8 +167,9 @@ deny[msga] { container := wl.spec.template.spec.containers[i] request_or_limit_cpu(container) - resource := is_min_max_exceeded_cpu(container) - resource != "" + resource := "resources.limits.cpu" + cpu_limit := container.resources.limits.cpu + is_limit_exceeded_cpu(cpu_limit) failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) @@ -191,8 +193,9 @@ deny[msga] { container = wl.spec.jobTemplate.spec.template.spec.containers[i] request_or_limit_cpu(container) - resource := is_min_max_exceeded_cpu(container) - resource != "" + resource := "resources.limits.cpu" + cpu_limit := container.resources.limits.cpu + is_limit_exceeded_cpu(cpu_limit) failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) @@ -209,7 +212,85 @@ deny[msga] { } } +# ============================================= cpu requests exceed min/max ============================================= + +# Fails if pod exceeds CPU-limit or request +deny[msga] { + pod := input[_] + pod.kind == "Pod" + container := pod.spec.containers[i] + request_or_limit_cpu(container) + resource := "resources.requests.cpu" + cpu_req := container.resources.requests.cpu + is_req_exceeded_cpu(cpu_req) + + failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), resource]) + + msga := { + "alertMessage": sprintf("Container: %v exceeds CPU-limit or request", [ container.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [failed_paths], + "failedPaths": [failed_paths], + "fixPaths": [], + "alertObject": { + "k8sApiObjects": [pod] + } + } +} + +# Fails if workload exceeds CPU-limit or request +deny[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + + request_or_limit_cpu(container) + resource := "resources.requests.cpu" + cpu_req := container.resources.requests.cpu + is_req_exceeded_cpu(cpu_req) + + failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v exceeds CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [failed_paths], + "failedPaths": [failed_paths], + "fixPaths": [], + "alertObject": { + "k8sApiObjects": [wl] + } + } +} + +# Fails if cronjob doas exceeds CPU-limit or request +deny[msga] { + wl := input[_] + wl.kind == "CronJob" + container = wl.spec.jobTemplate.spec.template.spec.containers[i] + + request_or_limit_cpu(container) + resource := "resources.requests.cpu" + cpu_req := container.resources.requests.cpu + is_req_exceeded_cpu(cpu_req) + + failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + msga := { + "alertMessage": sprintf("Container: %v in %v: %v exceeds CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [failed_paths], + "failedPaths": [failed_paths], + "fixPaths": [], + "alertObject": { + "k8sApiObjects": [wl] + } + } +} ################################################################################################################# diff --git a/rules/resources-memory-limit-and-request/raw.rego b/rules/resources-memory-limit-and-request/raw.rego index a82209c01..13e5dfaa2 100644 --- a/rules/resources-memory-limit-and-request/raw.rego +++ b/rules/resources-memory-limit-and-request/raw.rego @@ -1,6 +1,6 @@ package armo_builtins -# ================================== memory limits ================================== +# ================================== no memory limits ================================== # Fails if pod does not have container with memory-limits deny[msga] { pod := input[_] @@ -56,7 +56,7 @@ deny[msga] { } } -# ================================== memory requests ================================== +# ================================== no memory requests ================================== # Fails if pod does not have container with memory requests deny[msga] { pod := input[_] @@ -117,21 +117,22 @@ request_or_limit_memory(container) { container.resources.requests.memory } -###################################################################################################### +# ============================================= memory requests exceed min/max ============================================= -# Fails if pod exceeds memory-limit or request +# Fails if pod exceeds memory request deny[msga] { pod := input[_] pod.kind == "Pod" container := pod.spec.containers[i] request_or_limit_memory(container) - resource := is_min_max_exceeded_memory(container) - resource != "" + memory_req := container.resources.requests.memory + is_req_exceeded_memory(memory_req) + resource := "resources.requests.memory" failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), resource]) msga := { - "alertMessage": sprintf("Container: %v exceeds memory-limit or request", [container.name]), + "alertMessage": sprintf("Container: %v exceeds memory request", [container.name]), "packagename": "armo_builtins", "alertScore": 7, "reviewPaths": [failed_paths], @@ -141,7 +142,7 @@ deny[msga] { } } -# Fails if workload exceeds memory-limit or request +# Fails if workload exceeds memory request deny[msga] { wl := input[_] spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} @@ -149,13 +150,14 @@ deny[msga] { container := wl.spec.template.spec.containers[i] request_or_limit_memory(container) - resource := is_min_max_exceeded_memory(container) - resource != "" + memory_req := container.resources.requests.memory + is_req_exceeded_memory(memory_req) + resource := "resources.requests.memory" failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) msga := { - "alertMessage": sprintf("Container: %v in %v: %v exceeds memory-limit or request", [container.name, wl.kind, wl.metadata.name]), + "alertMessage": sprintf("Container: %v in %v: %v exceeds memory request", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, "reviewPaths": [failed_paths], @@ -165,20 +167,21 @@ deny[msga] { } } -# Fails if cronjob exceeds memory-limit or request +# Fails if cronjob exceeds memory request deny[msga] { wl := input[_] wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] request_or_limit_memory(container) - resource := is_min_max_exceeded_memory(container) - resource != "" + memory_req := container.resources.requests.memory + is_req_exceeded_memory(memory_req) + resource := "resources.requests.memory" failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) msga := { - "alertMessage": sprintf("Container: %v in %v: %v exceeds memory-limit or request", [container.name, wl.kind, wl.metadata.name]), + "alertMessage": sprintf("Container: %v in %v: %v exceeds memory request", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, "reviewPaths": [failed_paths], @@ -188,15 +191,82 @@ deny[msga] { } } -###################################################################################################### +# ============================================= memory limits exceed min/max ============================================= -is_min_max_exceeded_memory(container) = "resources.limits.memory" { +# Fails if pod exceeds memory-limit +deny[msga] { + pod := input[_] + pod.kind == "Pod" + container := pod.spec.containers[i] + request_or_limit_memory(container) memory_limit := container.resources.limits.memory is_limit_exceeded_memory(memory_limit) -} else = "resources.requests.memory" { - memory_req := container.resources.requests.memory - is_req_exceeded_memory(memory_req) -} else = "" + resource := "resources.limits.memory" + + failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), resource]) + + msga := { + "alertMessage": sprintf("Container: %v exceeds memory-limit ", [container.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [failed_paths], + "failedPaths": [failed_paths], + "fixPaths": [], + "alertObject": {"k8sApiObjects": [pod]}, + } +} + +# Fails if workload exceeds memory-limit +deny[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + + request_or_limit_memory(container) + memory_limit := container.resources.limits.memory + is_limit_exceeded_memory(memory_limit) + resource := "resources.limits.memory" + + failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v exceeds memory-limit", [container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [failed_paths], + "failedPaths": [failed_paths], + "fixPaths": [], + "alertObject": {"k8sApiObjects": [wl]}, + } +} + +# Fails if cronjob exceeds memory-limit +deny[msga] { + wl := input[_] + wl.kind == "CronJob" + container = wl.spec.jobTemplate.spec.template.spec.containers[i] + + request_or_limit_memory(container) + memory_limit := container.resources.limits.memory + is_limit_exceeded_memory(memory_limit) + resource := "resources.limits.memory" + + failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v exceeds memory-limit", [container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [failed_paths], + "failedPaths": [failed_paths], + "fixPaths": [], + "alertObject": {"k8sApiObjects": [wl]}, + } +} + +###################################################################################################### + is_limit_exceeded_memory(memory_limit) { is_min_limit_exceeded_memory(memory_limit) From 574ce4ab5f2e83db654f93be47cb37dcd7444dc0 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 17 Oct 2023 17:57:51 +0300 Subject: [PATCH 022/195] add test cases, var name change Signed-off-by: YiscahLevySilas1 --- .../resources-cpu-limit-and-request/raw.rego | 24 ++++----- .../test/workload-exceeded/data.json | 8 +++ .../test/workload-exceeded/expected.json | 50 +++++++++++++++++++ .../workload-exceeded/input/deployment.yaml | 28 +++++++++++ .../raw.rego | 24 ++++----- .../test/workload-exceeded/data.json | 8 +++ .../test/workload-exceeded/expected.json | 50 +++++++++++++++++++ .../workload-exceeded/input/deployment.yaml | 28 +++++++++++ 8 files changed, 196 insertions(+), 24 deletions(-) create mode 100644 rules/resources-cpu-limit-and-request/test/workload-exceeded/data.json create mode 100644 rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json create mode 100644 rules/resources-cpu-limit-and-request/test/workload-exceeded/input/deployment.yaml create mode 100644 rules/resources-memory-limit-and-request/test/workload-exceeded/data.json create mode 100644 rules/resources-memory-limit-and-request/test/workload-exceeded/expected.json create mode 100644 rules/resources-memory-limit-and-request/test/workload-exceeded/input/deployment.yaml diff --git a/rules/resources-cpu-limit-and-request/raw.rego b/rules/resources-cpu-limit-and-request/raw.rego index 3cfbac051..34e1e7707 100644 --- a/rules/resources-cpu-limit-and-request/raw.rego +++ b/rules/resources-cpu-limit-and-request/raw.rego @@ -140,11 +140,11 @@ deny[msga] { pod.kind == "Pod" container := pod.spec.containers[i] request_or_limit_cpu(container) - resource := "resources.limits.cpu" + path := "resources.limits.cpu" cpu_limit := container.resources.limits.cpu is_limit_exceeded_cpu(cpu_limit) - failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v exceeds CPU-limit or request", [ container.name]), @@ -167,11 +167,11 @@ deny[msga] { container := wl.spec.template.spec.containers[i] request_or_limit_cpu(container) - resource := "resources.limits.cpu" + path := "resources.limits.cpu" cpu_limit := container.resources.limits.cpu is_limit_exceeded_cpu(cpu_limit) - failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v in %v: %v exceeds CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), @@ -193,11 +193,11 @@ deny[msga] { container = wl.spec.jobTemplate.spec.template.spec.containers[i] request_or_limit_cpu(container) - resource := "resources.limits.cpu" + path := "resources.limits.cpu" cpu_limit := container.resources.limits.cpu is_limit_exceeded_cpu(cpu_limit) - failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v in %v: %v exceeds CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), @@ -220,11 +220,11 @@ deny[msga] { pod.kind == "Pod" container := pod.spec.containers[i] request_or_limit_cpu(container) - resource := "resources.requests.cpu" + path := "resources.requests.cpu" cpu_req := container.resources.requests.cpu is_req_exceeded_cpu(cpu_req) - failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v exceeds CPU-limit or request", [ container.name]), @@ -247,11 +247,11 @@ deny[msga] { container := wl.spec.template.spec.containers[i] request_or_limit_cpu(container) - resource := "resources.requests.cpu" + path := "resources.requests.cpu" cpu_req := container.resources.requests.cpu is_req_exceeded_cpu(cpu_req) - failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v in %v: %v exceeds CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), @@ -273,11 +273,11 @@ deny[msga] { container = wl.spec.jobTemplate.spec.template.spec.containers[i] request_or_limit_cpu(container) - resource := "resources.requests.cpu" + path := "resources.requests.cpu" cpu_req := container.resources.requests.cpu is_req_exceeded_cpu(cpu_req) - failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v in %v: %v exceeds CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), diff --git a/rules/resources-cpu-limit-and-request/test/workload-exceeded/data.json b/rules/resources-cpu-limit-and-request/test/workload-exceeded/data.json new file mode 100644 index 000000000..e164ddbba --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/workload-exceeded/data.json @@ -0,0 +1,8 @@ +{ + "postureControlInputs": { + "cpu_request_max": ["300m"], + "cpu_request_min": ["300m"], + "cpu_limit_max": ["300m"], + "cpu_limit_min": ["300m"] + } +} \ No newline at end of file diff --git a/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json b/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json new file mode 100644 index 000000000..b704104d4 --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json @@ -0,0 +1,50 @@ +[ + { + "alertMessage": "Container: log-aggregator in Deployment: test exceeds CPU-limit or request", + "failedPaths": [ + "spec.template.spec.containers[0].resources.limits.cpu" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + }, + { + "alertMessage": "Container: log-aggregator in Deployment: test exceeds CPU-limit or request", + "failedPaths": [ + "spec.template.spec.containers[0].resources.requests.cpu" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-cpu-limit-and-request/test/workload-exceeded/input/deployment.yaml b/rules/resources-cpu-limit-and-request/test/workload-exceeded/input/deployment.yaml new file mode 100644 index 000000000..94cdb4770 --- /dev/null +++ b/rules/resources-cpu-limit-and-request/test/workload-exceeded/input/deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test + namespace: default + labels: + purpose: demonstrate-command +spec: + selector: + matchLabels: + purpose: demonstrate-command + template: + metadata: + labels: + purpose: demonstrate-command + spec : + containers : + - + name : log-aggregator + image : images.my-company.example/log-aggregator:v6 + resources : + requests : + memory : "64Mi" + cpu : "250m" + limits : + memory : "128Mi" + cpu : "500m" + \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/raw.rego b/rules/resources-memory-limit-and-request/raw.rego index 13e5dfaa2..6d99edd4b 100644 --- a/rules/resources-memory-limit-and-request/raw.rego +++ b/rules/resources-memory-limit-and-request/raw.rego @@ -127,9 +127,9 @@ deny[msga] { request_or_limit_memory(container) memory_req := container.resources.requests.memory is_req_exceeded_memory(memory_req) - resource := "resources.requests.memory" + path := "resources.requests.memory" - failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v exceeds memory request", [container.name]), @@ -152,9 +152,9 @@ deny[msga] { request_or_limit_memory(container) memory_req := container.resources.requests.memory is_req_exceeded_memory(memory_req) - resource := "resources.requests.memory" + path := "resources.requests.memory" - failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v in %v: %v exceeds memory request", [container.name, wl.kind, wl.metadata.name]), @@ -176,9 +176,9 @@ deny[msga] { request_or_limit_memory(container) memory_req := container.resources.requests.memory is_req_exceeded_memory(memory_req) - resource := "resources.requests.memory" + path := "resources.requests.memory" - failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v in %v: %v exceeds memory request", [container.name, wl.kind, wl.metadata.name]), @@ -201,9 +201,9 @@ deny[msga] { request_or_limit_memory(container) memory_limit := container.resources.limits.memory is_limit_exceeded_memory(memory_limit) - resource := "resources.limits.memory" + path := "resources.limits.memory" - failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v exceeds memory-limit ", [container.name]), @@ -226,9 +226,9 @@ deny[msga] { request_or_limit_memory(container) memory_limit := container.resources.limits.memory is_limit_exceeded_memory(memory_limit) - resource := "resources.limits.memory" + path := "resources.limits.memory" - failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.template.spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v in %v: %v exceeds memory-limit", [container.name, wl.kind, wl.metadata.name]), @@ -250,9 +250,9 @@ deny[msga] { request_or_limit_memory(container) memory_limit := container.resources.limits.memory is_limit_exceeded_memory(memory_limit) - resource := "resources.limits.memory" + path := "resources.limits.memory" - failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), resource]) + failed_paths := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].%v", [format_int(i, 10), path]) msga := { "alertMessage": sprintf("Container: %v in %v: %v exceeds memory-limit", [container.name, wl.kind, wl.metadata.name]), diff --git a/rules/resources-memory-limit-and-request/test/workload-exceeded/data.json b/rules/resources-memory-limit-and-request/test/workload-exceeded/data.json new file mode 100644 index 000000000..276227e9e --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/workload-exceeded/data.json @@ -0,0 +1,8 @@ +{ + "postureControlInputs": { + "memory_request_max": ["300Mi"], + "memory_request_min": ["300Mi"], + "memory_limit_max": ["300Mi"], + "memory_limit_min": ["300Mi"] + } +} \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/test/workload-exceeded/expected.json b/rules/resources-memory-limit-and-request/test/workload-exceeded/expected.json new file mode 100644 index 000000000..fb8d24b50 --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/workload-exceeded/expected.json @@ -0,0 +1,50 @@ +[ + { + "alertMessage": "Container: log-aggregator in Deployment: test exceeds memory request", + "failedPaths": [ + "spec.template.spec.containers[0].resources.requests.memory" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + }, + { + "alertMessage": "Container: log-aggregator in Deployment: test exceeds memory-limit", + "failedPaths": [ + "spec.template.spec.containers[0].resources.limits.memory" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/test/workload-exceeded/input/deployment.yaml b/rules/resources-memory-limit-and-request/test/workload-exceeded/input/deployment.yaml new file mode 100644 index 000000000..208339fff --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/workload-exceeded/input/deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test + namespace: default + labels: + purpose: demonstrate-command +spec: + selector: + matchLabels: + purpose: demonstrate-command + template: + metadata: + labels: + purpose: demonstrate-command + spec : + containers : + - + name : log-aggregator + image : images.my-company.example/log-aggregator:v6 + resources : + requests : + memory : "64Mi" + cpu : "250m" + limits : + memory : "328Mi" + cpu : "500m" + \ No newline at end of file From db58d4410b011d305a1a391b8044b35659ef62a7 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Wed, 18 Oct 2023 09:04:37 +0300 Subject: [PATCH 023/195] Update upload-readme.py Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- scripts/upload-readme.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/scripts/upload-readme.py b/scripts/upload-readme.py index 4848a813b..d351f224e 100644 --- a/scripts/upload-readme.py +++ b/scripts/upload-readme.py @@ -121,11 +121,16 @@ def update_doc(self, doc_slug: str, order: any, title: str, body: str, category: return r.json() +# function is validating if the structure is validated and return an error if missing some objects. +# NOTE: objects might be changed from time to time, need to update accordingly def validate_readme_structure(readmeapi : ReadmeApi): categories = readmeapi.get_categories() - filtered_categories = list(filter(lambda c: c['title'] == 'Controls',categories)) + filtered_categories = list(filter(lambda c: c['title'] == 'Review Controls',categories)) + print(categories) + if len(filtered_categories) != 1: - raise Exception('Readme structure validation failure: missing "Controls" category (or more than one)') + raise Exception('Readme structure validation failure: missing "Review Controls" category (or more than one)') + controls_category = filtered_categories[0] docs_in_control_category = readmeapi.get_docs_in_category(controls_category['slug']) filtered_docs = list(filter(lambda d: d['title'] == 'Controls',docs_in_control_category)) @@ -134,9 +139,9 @@ def validate_readme_structure(readmeapi : ReadmeApi): def get_document_for_control(readmeapi : ReadmeApi, control): categories = readmeapi.get_categories() - filtered_categories = list(filter(lambda c: c['title'] == 'Controls',categories)) + filtered_categories = list(filter(lambda c: c['title'] == 'Review Controls',categories)) if len(filtered_categories) != 1: - raise Exception('Readme structure failure: missing "Controls" category (or more than one)') + raise Exception('Readme structure failure: missing "Review Controls" category (or more than one)') controls_category = filtered_categories[0] docs_in_control_category = readmeapi.get_docs_in_category(controls_category['slug']) filtered_docs = list(filter(lambda d: d['title'].startswith(control['id']),docs_in_control_category)) @@ -511,4 +516,3 @@ def get_controls_doc_slugs(readmeapi: ReadmeApi) -> list: if __name__ == '__main__': main() - From 7f892297d3ba14cc9fac270ccfc687278db6530a Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 18 Oct 2023 12:34:38 +0300 Subject: [PATCH 024/195] separate limit and request cases Signed-off-by: YiscahLevySilas1 --- .../resources-cpu-limit-and-request/raw.rego | 11 ---- .../test/workload-exceeded/data.json | 8 +-- .../test/workload-exceeded/expected.json | 25 ++++--- .../workload-exceeded/input/deployment.yaml | 65 ++++++++++++++----- 4 files changed, 62 insertions(+), 47 deletions(-) diff --git a/rules/resources-cpu-limit-and-request/raw.rego b/rules/resources-cpu-limit-and-request/raw.rego index 34e1e7707..114962d76 100644 --- a/rules/resources-cpu-limit-and-request/raw.rego +++ b/rules/resources-cpu-limit-and-request/raw.rego @@ -139,7 +139,6 @@ deny[msga] { pod := input[_] pod.kind == "Pod" container := pod.spec.containers[i] - request_or_limit_cpu(container) path := "resources.limits.cpu" cpu_limit := container.resources.limits.cpu is_limit_exceeded_cpu(cpu_limit) @@ -166,7 +165,6 @@ deny[msga] { spec_template_spec_patterns[wl.kind] container := wl.spec.template.spec.containers[i] - request_or_limit_cpu(container) path := "resources.limits.cpu" cpu_limit := container.resources.limits.cpu is_limit_exceeded_cpu(cpu_limit) @@ -192,7 +190,6 @@ deny[msga] { wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] - request_or_limit_cpu(container) path := "resources.limits.cpu" cpu_limit := container.resources.limits.cpu is_limit_exceeded_cpu(cpu_limit) @@ -219,7 +216,6 @@ deny[msga] { pod := input[_] pod.kind == "Pod" container := pod.spec.containers[i] - request_or_limit_cpu(container) path := "resources.requests.cpu" cpu_req := container.resources.requests.cpu is_req_exceeded_cpu(cpu_req) @@ -246,7 +242,6 @@ deny[msga] { spec_template_spec_patterns[wl.kind] container := wl.spec.template.spec.containers[i] - request_or_limit_cpu(container) path := "resources.requests.cpu" cpu_req := container.resources.requests.cpu is_req_exceeded_cpu(cpu_req) @@ -272,7 +267,6 @@ deny[msga] { wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] - request_or_limit_cpu(container) path := "resources.requests.cpu" cpu_req := container.resources.requests.cpu is_req_exceeded_cpu(cpu_req) @@ -295,11 +289,6 @@ deny[msga] { ################################################################################################################# -request_or_limit_cpu(container) { - container.resources.limits.cpu - container.resources.requests.cpu -} - is_min_max_exceeded_cpu(container) = "resources.limits.cpu" { cpu_limit := container.resources.limits.cpu diff --git a/rules/resources-cpu-limit-and-request/test/workload-exceeded/data.json b/rules/resources-cpu-limit-and-request/test/workload-exceeded/data.json index e164ddbba..0ee80232d 100644 --- a/rules/resources-cpu-limit-and-request/test/workload-exceeded/data.json +++ b/rules/resources-cpu-limit-and-request/test/workload-exceeded/data.json @@ -1,8 +1,8 @@ { "postureControlInputs": { - "cpu_request_max": ["300m"], - "cpu_request_min": ["300m"], - "cpu_limit_max": ["300m"], - "cpu_limit_min": ["300m"] + "cpu_request_max": ["50m"], + "cpu_request_min": ["50m"], + "cpu_limit_max": ["50m"], + "cpu_limit_min": ["50m"] } } \ No newline at end of file diff --git a/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json b/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json index b704104d4..ee7dd0f9e 100644 --- a/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json +++ b/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json @@ -1,10 +1,13 @@ [ { - "alertMessage": "Container: log-aggregator in Deployment: test exceeds CPU-limit or request", - "failedPaths": [ - "spec.template.spec.containers[0].resources.limits.cpu" + "alertMessage": "Container: health-check in Deployment: health-check-deployment does not have CPU-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].resources.requests.cpu", + "value": "YOUR_VALUE" + } ], - "fixPaths": [], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, @@ -14,19 +17,16 @@ "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": { - "purpose": "demonstrate-command" - }, - "name": "test" + "name": "health-check-deployment" } } ] } }, { - "alertMessage": "Container: log-aggregator in Deployment: test exceeds CPU-limit or request", + "alertMessage": "Container: health-check in Deployment: health-check-deployment exceeds CPU-limit or request", "failedPaths": [ - "spec.template.spec.containers[0].resources.requests.cpu" + "spec.template.spec.containers[0].resources.limits.cpu" ], "fixPaths": [], "ruleStatus": "", @@ -38,10 +38,7 @@ "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": { - "purpose": "demonstrate-command" - }, - "name": "test" + "name": "health-check-deployment" } } ] diff --git a/rules/resources-cpu-limit-and-request/test/workload-exceeded/input/deployment.yaml b/rules/resources-cpu-limit-and-request/test/workload-exceeded/input/deployment.yaml index 94cdb4770..4c09ee1d5 100644 --- a/rules/resources-cpu-limit-and-request/test/workload-exceeded/input/deployment.yaml +++ b/rules/resources-cpu-limit-and-request/test/workload-exceeded/input/deployment.yaml @@ -1,28 +1,57 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: test + annotations: + deployment.kubernetes.io/revision: '1' + creationTimestamp: '2023-10-17T12:50:59Z' + generation: 1 + name: health-check-deployment namespace: default - labels: - purpose: demonstrate-command + resourceVersion: '1383' + uid: 405080f2-c98e-450e-8e74-9f7e73a9c421 spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 selector: matchLabels: - purpose: demonstrate-command + app: health-check + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate template: metadata: + creationTimestamp: null labels: - purpose: demonstrate-command - spec : - containers : - - - name : log-aggregator - image : images.my-company.example/log-aggregator:v6 - resources : - requests : - memory : "64Mi" - cpu : "250m" - limits : - memory : "128Mi" - cpu : "500m" - \ No newline at end of file + app: health-check + spec: + containers: + - image: madhuakula/k8s-goat-health-check + imagePullPolicy: Always + name: health-check + ports: + - containerPort: 80 + protocol: TCP + resources: + limits: + cpu: 80m + memory: 100Mi + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /custom/docker/docker.sock + name: docker-sock-volume + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - hostPath: + path: /var/run/docker.sock + type: Socket + name: docker-sock-volume From ad0448ad878334cf8ef53bf5640f14f4211662fe Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Wed, 18 Oct 2023 18:22:38 +0300 Subject: [PATCH 025/195] use cluster scope instead of aks Signed-off-by: David Wertenteil --- controls/C-0239-preferusingdedicatedaksserviceaccounts.json | 2 +- .../C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json | 2 +- controls/C-0241-useazurerbacforkubernetesauthorization.json | 2 +- controls/C-0242-hostilemultitenantworkloads.json | 2 +- ...ingusingazuredefenderimagescanningorathirdpartyprovider.json | 2 +- controls/C-0244-ensurekubernetessecretsareencrypted.json | 2 +- ...5-encrypttraffictohttpsloadbalancerswithtlscertificates.json | 2 +- controls/C-0247-restrictaccesstothecontrolplaneendpoint.json | 2 +- controls/C-0248-ensureclustersarecreatedwithprivatenodes.json | 2 +- controls/C-0249-restrictuntrustedworkloads.json | 2 +- ...mizeclusteraccesstoreadonlyforazurecontainerregistryacr.json | 2 +- ...reatedwithprivateendpointenabledandpublicaccessdisabled.json | 2 +- controls/C-0254-enableauditlogs.json | 2 +- frameworks/cis-aks-t1.2.0.json | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/controls/C-0239-preferusingdedicatedaksserviceaccounts.json b/controls/C-0239-preferusingdedicatedaksserviceaccounts.json index a2159db1f..bf35e66d9 100644 --- a/controls/C-0239-preferusingdedicatedaksserviceaccounts.json +++ b/controls/C-0239-preferusingdedicatedaksserviceaccounts.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json b/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json index 5faee94ea..05f47cd82 100644 --- a/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json +++ b/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json @@ -19,7 +19,7 @@ "default_value": "By default, Network Policy is disabled.", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0241-useazurerbacforkubernetesauthorization.json b/controls/C-0241-useazurerbacforkubernetesauthorization.json index c49ccdac6..e6c1b427e 100644 --- a/controls/C-0241-useazurerbacforkubernetesauthorization.json +++ b/controls/C-0241-useazurerbacforkubernetesauthorization.json @@ -17,7 +17,7 @@ "default_value": "", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0242-hostilemultitenantworkloads.json b/controls/C-0242-hostilemultitenantworkloads.json index 62e98a3bb..57ba789a3 100644 --- a/controls/C-0242-hostilemultitenantworkloads.json +++ b/controls/C-0242-hostilemultitenantworkloads.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json b/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json index daa7c6d2e..807ae9e27 100644 --- a/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json +++ b/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json @@ -19,7 +19,7 @@ "default_value": "Images are not scanned by Default.", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0244-ensurekubernetessecretsareencrypted.json b/controls/C-0244-ensurekubernetessecretsareencrypted.json index 3bb263573..ba9532a2b 100644 --- a/controls/C-0244-ensurekubernetessecretsareencrypted.json +++ b/controls/C-0244-ensurekubernetessecretsareencrypted.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json b/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json index d2c800b01..88f969af4 100644 --- a/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json +++ b/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json b/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json index b9d730671..4dafa07fc 100644 --- a/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json +++ b/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json @@ -19,7 +19,7 @@ "default_value": "By default, Endpoint Private Access is disabled.", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json b/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json index aca15749d..e2e652624 100644 --- a/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json +++ b/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0249-restrictuntrustedworkloads.json b/controls/C-0249-restrictuntrustedworkloads.json index e2c5d3123..9baf820a7 100644 --- a/controls/C-0249-restrictuntrustedworkloads.json +++ b/controls/C-0249-restrictuntrustedworkloads.json @@ -20,7 +20,7 @@ "default_value": "ACI is not a default component of the AKS", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json b/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json index 674e4c2b6..0244eb587 100644 --- a/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json +++ b/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } diff --git a/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json b/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json index 15d9a5c14..649461739 100644 --- a/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json +++ b/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/controls/C-0254-enableauditlogs.json b/controls/C-0254-enableauditlogs.json index eb9ef642f..399f3458b 100644 --- a/controls/C-0254-enableauditlogs.json +++ b/controls/C-0254-enableauditlogs.json @@ -19,7 +19,7 @@ "default_value": "By default, cluster control plane logs aren't sent to be Logged.", "scanningScope": { "matches": [ - "AKS" + "cluster" ] } } \ No newline at end of file diff --git a/frameworks/cis-aks-t1.2.0.json b/frameworks/cis-aks-t1.2.0.json index 7eac30cec..a3d83ccef 100644 --- a/frameworks/cis-aks-t1.2.0.json +++ b/frameworks/cis-aks-t1.2.0.json @@ -7,7 +7,7 @@ }, "scanningScope": { "matches": [ - "AKS" + "cluster" ] }, "typeTags": ["compliance"], From 8f42515c6ceafdb9c58baf3549ff7b8acc2b7ebe Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 19 Oct 2023 13:39:45 +0300 Subject: [PATCH 026/195] separate limit and request cases Signed-off-by: YiscahLevySilas1 --- rules/resources-memory-limit-and-request/raw.rego | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/rules/resources-memory-limit-and-request/raw.rego b/rules/resources-memory-limit-and-request/raw.rego index 6d99edd4b..799c80831 100644 --- a/rules/resources-memory-limit-and-request/raw.rego +++ b/rules/resources-memory-limit-and-request/raw.rego @@ -112,10 +112,6 @@ deny[msga] { } } -request_or_limit_memory(container) { - container.resources.limits.memory - container.resources.requests.memory -} # ============================================= memory requests exceed min/max ============================================= @@ -124,7 +120,6 @@ deny[msga] { pod := input[_] pod.kind == "Pod" container := pod.spec.containers[i] - request_or_limit_memory(container) memory_req := container.resources.requests.memory is_req_exceeded_memory(memory_req) path := "resources.requests.memory" @@ -149,7 +144,6 @@ deny[msga] { spec_template_spec_patterns[wl.kind] container := wl.spec.template.spec.containers[i] - request_or_limit_memory(container) memory_req := container.resources.requests.memory is_req_exceeded_memory(memory_req) path := "resources.requests.memory" @@ -173,7 +167,6 @@ deny[msga] { wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] - request_or_limit_memory(container) memory_req := container.resources.requests.memory is_req_exceeded_memory(memory_req) path := "resources.requests.memory" @@ -198,7 +191,6 @@ deny[msga] { pod := input[_] pod.kind == "Pod" container := pod.spec.containers[i] - request_or_limit_memory(container) memory_limit := container.resources.limits.memory is_limit_exceeded_memory(memory_limit) path := "resources.limits.memory" @@ -223,7 +215,6 @@ deny[msga] { spec_template_spec_patterns[wl.kind] container := wl.spec.template.spec.containers[i] - request_or_limit_memory(container) memory_limit := container.resources.limits.memory is_limit_exceeded_memory(memory_limit) path := "resources.limits.memory" @@ -247,7 +238,6 @@ deny[msga] { wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] - request_or_limit_memory(container) memory_limit := container.resources.limits.memory is_limit_exceeded_memory(memory_limit) path := "resources.limits.memory" From 153a9db22d1b622910e1c041ccca711525771572 Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Mon, 23 Oct 2023 11:43:58 +0300 Subject: [PATCH 027/195] Revert "Use cluster scope instead of AKS" --- controls/C-0239-preferusingdedicatedaksserviceaccounts.json | 2 +- .../C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json | 2 +- controls/C-0241-useazurerbacforkubernetesauthorization.json | 2 +- controls/C-0242-hostilemultitenantworkloads.json | 2 +- ...ingusingazuredefenderimagescanningorathirdpartyprovider.json | 2 +- controls/C-0244-ensurekubernetessecretsareencrypted.json | 2 +- ...5-encrypttraffictohttpsloadbalancerswithtlscertificates.json | 2 +- controls/C-0247-restrictaccesstothecontrolplaneendpoint.json | 2 +- controls/C-0248-ensureclustersarecreatedwithprivatenodes.json | 2 +- controls/C-0249-restrictuntrustedworkloads.json | 2 +- ...mizeclusteraccesstoreadonlyforazurecontainerregistryacr.json | 2 +- ...reatedwithprivateendpointenabledandpublicaccessdisabled.json | 2 +- controls/C-0254-enableauditlogs.json | 2 +- frameworks/cis-aks-t1.2.0.json | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/controls/C-0239-preferusingdedicatedaksserviceaccounts.json b/controls/C-0239-preferusingdedicatedaksserviceaccounts.json index bf35e66d9..a2159db1f 100644 --- a/controls/C-0239-preferusingdedicatedaksserviceaccounts.json +++ b/controls/C-0239-preferusingdedicatedaksserviceaccounts.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json b/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json index 05f47cd82..5faee94ea 100644 --- a/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json +++ b/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json @@ -19,7 +19,7 @@ "default_value": "By default, Network Policy is disabled.", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0241-useazurerbacforkubernetesauthorization.json b/controls/C-0241-useazurerbacforkubernetesauthorization.json index e6c1b427e..c49ccdac6 100644 --- a/controls/C-0241-useazurerbacforkubernetesauthorization.json +++ b/controls/C-0241-useazurerbacforkubernetesauthorization.json @@ -17,7 +17,7 @@ "default_value": "", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0242-hostilemultitenantworkloads.json b/controls/C-0242-hostilemultitenantworkloads.json index 57ba789a3..62e98a3bb 100644 --- a/controls/C-0242-hostilemultitenantworkloads.json +++ b/controls/C-0242-hostilemultitenantworkloads.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json b/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json index 807ae9e27..daa7c6d2e 100644 --- a/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json +++ b/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json @@ -19,7 +19,7 @@ "default_value": "Images are not scanned by Default.", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0244-ensurekubernetessecretsareencrypted.json b/controls/C-0244-ensurekubernetessecretsareencrypted.json index ba9532a2b..3bb263573 100644 --- a/controls/C-0244-ensurekubernetessecretsareencrypted.json +++ b/controls/C-0244-ensurekubernetessecretsareencrypted.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json b/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json index 88f969af4..d2c800b01 100644 --- a/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json +++ b/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json b/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json index 4dafa07fc..b9d730671 100644 --- a/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json +++ b/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json @@ -19,7 +19,7 @@ "default_value": "By default, Endpoint Private Access is disabled.", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json b/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json index e2e652624..aca15749d 100644 --- a/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json +++ b/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0249-restrictuntrustedworkloads.json b/controls/C-0249-restrictuntrustedworkloads.json index 9baf820a7..e2c5d3123 100644 --- a/controls/C-0249-restrictuntrustedworkloads.json +++ b/controls/C-0249-restrictuntrustedworkloads.json @@ -20,7 +20,7 @@ "default_value": "ACI is not a default component of the AKS", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json b/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json index 0244eb587..674e4c2b6 100644 --- a/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json +++ b/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } diff --git a/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json b/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json index 649461739..15d9a5c14 100644 --- a/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json +++ b/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json @@ -19,7 +19,7 @@ "default_value": "", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/controls/C-0254-enableauditlogs.json b/controls/C-0254-enableauditlogs.json index 399f3458b..eb9ef642f 100644 --- a/controls/C-0254-enableauditlogs.json +++ b/controls/C-0254-enableauditlogs.json @@ -19,7 +19,7 @@ "default_value": "By default, cluster control plane logs aren't sent to be Logged.", "scanningScope": { "matches": [ - "cluster" + "AKS" ] } } \ No newline at end of file diff --git a/frameworks/cis-aks-t1.2.0.json b/frameworks/cis-aks-t1.2.0.json index a3d83ccef..7eac30cec 100644 --- a/frameworks/cis-aks-t1.2.0.json +++ b/frameworks/cis-aks-t1.2.0.json @@ -7,7 +7,7 @@ }, "scanningScope": { "matches": [ - "cluster" + "AKS" ] }, "typeTags": ["compliance"], From ec5e92f3975e53c6b357d9903e8fccdf019f4a85 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 24 Oct 2023 10:19:05 +0300 Subject: [PATCH 028/195] Update pr-comments.yaml Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- .github/workflows/pr-comments.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pr-comments.yaml b/.github/workflows/pr-comments.yaml index 6a1c10390..a394fecb6 100644 --- a/.github/workflows/pr-comments.yaml +++ b/.github/workflows/pr-comments.yaml @@ -1,8 +1,9 @@ name: PR Comment Trigger on: - issue_comment: - types: [created] + workflow_call: + # issue_comment: + # types: [created] jobs: job01: if: ${{ github.event.issue.pull_request }} From bcf6663c9dc48b173fdd89bfa2c9d1c736005df2 Mon Sep 17 00:00:00 2001 From: Meital Rudnitsky Date: Tue, 31 Oct 2023 15:40:26 +0200 Subject: [PATCH 029/195] replace serviceaccountname with serviceAccountName Signed-off-by: Meital Rudnitsky --- rules/rule-access-dashboard-wl-v1/raw.rego | 12 ++++++------ .../test/cronjob/expected.json | 3 ++- .../test/pod/expected.json | 3 ++- .../test/workload/expected.json | 3 ++- rules/rule-access-dashboard/raw.rego | 12 ++++++------ 5 files changed, 18 insertions(+), 15 deletions(-) diff --git a/rules/rule-access-dashboard-wl-v1/raw.rego b/rules/rule-access-dashboard-wl-v1/raw.rego index d3191a423..48dd06c76 100644 --- a/rules/rule-access-dashboard-wl-v1/raw.rego +++ b/rules/rule-access-dashboard-wl-v1/raw.rego @@ -14,8 +14,8 @@ deny[msga] { "packagename": "armo_builtins", "alertScore": 7, "fixPaths": [], - "deletePaths": ["spec.serviceaccountname"], - "failedPaths": ["spec.serviceaccountname"], + "deletePaths": ["spec.serviceAccountName"], + "failedPaths": ["spec.serviceAccountName"], "alertObject": { "k8sApiObjects": [pod] } @@ -36,8 +36,8 @@ deny[msga] { msga := { "alertMessage": sprintf("%v: %v is associated with dashboard service account", [wl.kind, wl.metadata.name]), "packagename": "armo_builtins", - "deletePaths": ["spec.template.spec.serviceaccountname"], - "failedPaths": ["spec.template.spec.serviceaccountname"], + "deletePaths": ["spec.template.spec.serviceAccountName"], + "failedPaths": ["spec.template.spec.serviceAccountName"], "alertScore": 7, "fixPaths": [], "alertObject": { @@ -61,8 +61,8 @@ deny[msga] { "packagename": "armo_builtins", "alertScore": 7, "fixPaths": [], - "deletePaths": ["spec.jobTemplate.spec.template.spec.serviceaccountname"], - "failedPaths": ["spec.jobTemplate.spec.template.spec.serviceaccountname"], + "deletePaths": ["spec.jobTemplate.spec.template.spec.serviceAccountName"], + "failedPaths": ["spec.jobTemplate.spec.template.spec.serviceAccountName"], "alertObject": { "k8sApiObjects": [wl] } diff --git a/rules/rule-access-dashboard-wl-v1/test/cronjob/expected.json b/rules/rule-access-dashboard-wl-v1/test/cronjob/expected.json index 4e9b7fc03..9819413ae 100644 --- a/rules/rule-access-dashboard-wl-v1/test/cronjob/expected.json +++ b/rules/rule-access-dashboard-wl-v1/test/cronjob/expected.json @@ -1,6 +1,7 @@ [{ "alertMessage": "the following cronjob: hello is associated with dashboard service account", - "failedPaths": ["spec.jobTemplate.spec.template.spec.serviceaccountname"], + "failedPaths": ["spec.jobTemplate.spec.template.spec.serviceAccountName"], + "deletePaths": ["spec.jobTemplate.spec.template.spec.serviceAccountName"], "fixPaths": [], "ruleStatus": "", "packagename": "armo_builtins", diff --git a/rules/rule-access-dashboard-wl-v1/test/pod/expected.json b/rules/rule-access-dashboard-wl-v1/test/pod/expected.json index 54165477b..f3cea907d 100644 --- a/rules/rule-access-dashboard-wl-v1/test/pod/expected.json +++ b/rules/rule-access-dashboard-wl-v1/test/pod/expected.json @@ -1,6 +1,7 @@ [{ "alertMessage": "the following pods: frontend are associated with dashboard service account", - "failedPaths": ["spec.serviceaccountname"], + "failedPaths": ["spec.serviceAccountName"], + "deletePaths": ["spec.serviceAccountName"], "fixPaths": [], "ruleStatus": "", "packagename": "armo_builtins", diff --git a/rules/rule-access-dashboard-wl-v1/test/workload/expected.json b/rules/rule-access-dashboard-wl-v1/test/workload/expected.json index facb5b185..e9feb3edd 100644 --- a/rules/rule-access-dashboard-wl-v1/test/workload/expected.json +++ b/rules/rule-access-dashboard-wl-v1/test/workload/expected.json @@ -1,6 +1,7 @@ [{ "alertMessage": "Deployment: test is associated with dashboard service account", - "failedPaths": ["spec.template.spec.serviceaccountname"], + "failedPaths": ["spec.template.spec.serviceAccountName"], + "deletePaths": ["spec.template.spec.serviceAccountName"], "fixPaths": [], "ruleStatus": "", "packagename": "armo_builtins", diff --git a/rules/rule-access-dashboard/raw.rego b/rules/rule-access-dashboard/raw.rego index 1b5b94867..228aaaaff 100644 --- a/rules/rule-access-dashboard/raw.rego +++ b/rules/rule-access-dashboard/raw.rego @@ -58,9 +58,9 @@ deny[msga] { deny[msga] { pod := input[_] - pod.spec.serviceaccountname == "kubernetes-dashboard" + pod.spec.serviceAccountName == "kubernetes-dashboard" not startswith(pod.metadata.name, "kubernetes-dashboard") - path := "spec.serviceaccountname" + path := "spec.serviceAccountName" msga := { "alertMessage": sprintf("the following pods: %s are associated with dashboard service account", [pod.metadata.name]), "packagename": "armo_builtins", @@ -81,9 +81,9 @@ deny[msga] { wl := input[_] spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} spec_template_spec_patterns[wl.kind] - wl.spec.template.spec.serviceaccountname == "kubernetes-dashboard" + wl.spec.template.spec.serviceAccountName == "kubernetes-dashboard" not startswith(wl.metadata.name, "kubernetes-dashboard") - path := "spec.template.spec.serviceaccountname" + path := "spec.template.spec.serviceAccountName" msga := { "alertMessage": sprintf("%v: %v is associated with dashboard service account", [wl.kind, wl.metadata.name]), "packagename": "armo_builtins", @@ -103,9 +103,9 @@ deny[msga] { deny[msga] { wl := input[_] wl.kind == "CronJob" - wl.spec.jobTemplate.spec.template.spec.serviceaccountname == "kubernetes-dashboard" + wl.spec.jobTemplate.spec.template.spec.serviceAccountName == "kubernetes-dashboard" not startswith(wl.metadata.name, "kubernetes-dashboard") - path := "spec.jobTemplate.spec.template.spec.serviceaccountname" + path := "spec.jobTemplate.spec.template.spec.serviceAccountName" msga := { "alertMessage": sprintf("the following cronjob: %s is associated with dashboard service account", [wl.metadata.name]), "packagename": "armo_builtins", From 7a807db113bbe63d5e162c6a0d38034d10d2fffd Mon Sep 17 00:00:00 2001 From: Matthias Bertschy Date: Fri, 20 Oct 2023 09:47:02 +0200 Subject: [PATCH 030/195] adding maintainers file Signed-off-by: Matthias Bertschy --- MAINTAINERS.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 MAINTAINERS.md diff --git a/MAINTAINERS.md b/MAINTAINERS.md new file mode 100644 index 000000000..cbcacdb74 --- /dev/null +++ b/MAINTAINERS.md @@ -0,0 +1,12 @@ +# Maintainers + +The following table lists the project core maintainers: + +| Name | GitHub | Organization | Added/Renewed On | +| --- | --- | --- |------------------| +| [Yiscah Levy Silas](https://www.linkedin.com/in/yiscah-levy-silas/) | [@YiscahLevySilas1](https://github.com/YiscahLevySilas1) | [ARMO](https://www.armosec.io/) | 2021-09-01 | +| [Daniel Grunberger](https://www.linkedin.com/in/daniel-grunberger-719685188/) | [@Daniel-GrunbergerCA](https://github.com/Daniel-GrunbergerCA) | [ARMO](https://www.armosec.io/) | 2021-09-01 | +| [Yuval Leibovich](https://www.linkedin.com/in/yuval-leibovich-42ab9661/) | [@yuleib](https://github.com/yuleib) | [ARMO](https://www.armosec.io/) | 2022-11-01 | +| [Alessio Greggi](https://www.linkedin.com/in/alegrey91/) | [@alegrey91](https://github.com/alegrey91) | [ARMO](https://www.armosec.io/) | 2023-02-01 | +| [Ben Hirschberg](https://www.linkedin.com/in/benyamin-ben-hirschberg-66141890) | [@slashben](https://github.com/slashben) | [ARMO](https://www.armosec.io/) | 2021-09-01 | +| [David Wertenteil](https://www.linkedin.com/in/david-wertenteil-0ba277b9) | [@dwertent](https://github.com/dwertent) | [ARMO](https://www.armosec.io/) | 2021-09-01 | From 4b0886532119911f70065e7491414fffc3a41ea1 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Mon, 6 Nov 2023 08:46:05 +0200 Subject: [PATCH 031/195] Update pr-tests.yaml Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- .github/workflows/pr-tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 9ad180434..b507ea281 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -6,7 +6,7 @@ on: # run for every chnage in the PR types: [ opened, synchronize, reopened, ready_for_review ] # Do not run the pipeline if only Markdown files changed - paths-ignore: ['**.md'] + # paths-ignore: ['**.md'] concurrency: group: ${{ github.workflow }}-${{ github.ref }} From c81929444ed62abfca05e1d922b4856e27121d09 Mon Sep 17 00:00:00 2001 From: kooomix Date: Mon, 20 Nov 2023 08:02:01 +0200 Subject: [PATCH 032/195] marking smartremediation controls Signed-off-by: kooomix --- controls/C-0016-allowprivilegeescalation.json | 3 ++- controls/C-0017-immutablecontainerfilesystem.json | 3 ++- controls/C-0034-automaticmappingofserviceaccount.json | 3 ++- controls/C-0045-writablehostpathmount.json | 3 ++- controls/C-0046-insecurecapabilities.json | 3 ++- controls/C-0048-hostpathmount.json | 3 ++- controls/C-0057-privilegedcontainer.json | 3 ++- controls/C-0074-containersmountingdockersocket.json | 3 ++- go.mod | 4 ++-- go.sum | 8 ++++---- 10 files changed, 22 insertions(+), 14 deletions(-) diff --git a/controls/C-0016-allowprivilegeescalation.json b/controls/C-0016-allowprivilegeescalation.json index 75c50947f..c6fcd7665 100644 --- a/controls/C-0016-allowprivilegeescalation.json +++ b/controls/C-0016-allowprivilegeescalation.json @@ -4,7 +4,8 @@ "armoBuiltin": true, "controlTypeTags": [ "security", - "compliance" + "compliance", + "smartRemediation" ] }, "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", diff --git a/controls/C-0017-immutablecontainerfilesystem.json b/controls/C-0017-immutablecontainerfilesystem.json index 63f47a664..45b21bd7f 100644 --- a/controls/C-0017-immutablecontainerfilesystem.json +++ b/controls/C-0017-immutablecontainerfilesystem.json @@ -4,7 +4,8 @@ "armoBuiltin": true, "controlTypeTags": [ "security", - "compliance" + "compliance", + "smartRemediation" ], "attackTracks": [ { diff --git a/controls/C-0034-automaticmappingofserviceaccount.json b/controls/C-0034-automaticmappingofserviceaccount.json index a6a37ad32..bcea7277f 100644 --- a/controls/C-0034-automaticmappingofserviceaccount.json +++ b/controls/C-0034-automaticmappingofserviceaccount.json @@ -4,7 +4,8 @@ "armoBuiltin": true, "controlTypeTags": [ "security", - "compliance" + "compliance", + "smartRemediation" ] }, "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", diff --git a/controls/C-0045-writablehostpathmount.json b/controls/C-0045-writablehostpathmount.json index 44199b0cb..046a39d12 100644 --- a/controls/C-0045-writablehostpathmount.json +++ b/controls/C-0045-writablehostpathmount.json @@ -10,7 +10,8 @@ "security", "compliance", "devops", - "security-impact" + "security-impact", + "smartRemediation" ], "attackTracks": [ { diff --git a/controls/C-0046-insecurecapabilities.json b/controls/C-0046-insecurecapabilities.json index 2ca8ee8d0..6602ac291 100644 --- a/controls/C-0046-insecurecapabilities.json +++ b/controls/C-0046-insecurecapabilities.json @@ -5,7 +5,8 @@ "armoBuiltin": true, "controlTypeTags": [ "security", - "compliance" + "compliance", + "smartRemediation" ], "attackTracks": [ { diff --git a/controls/C-0048-hostpathmount.json b/controls/C-0048-hostpathmount.json index 9c84c0e44..ca4927470 100644 --- a/controls/C-0048-hostpathmount.json +++ b/controls/C-0048-hostpathmount.json @@ -7,7 +7,8 @@ ], "controlTypeTags": [ "security", - "compliance" + "compliance", + "smartRemediation" ], "attackTracks": [ { diff --git a/controls/C-0057-privilegedcontainer.json b/controls/C-0057-privilegedcontainer.json index 4189c7554..6cb4a2074 100644 --- a/controls/C-0057-privilegedcontainer.json +++ b/controls/C-0057-privilegedcontainer.json @@ -6,7 +6,8 @@ "Privilege escalation" ], "controlTypeTags": [ - "security" + "security", + "smartRemediation" ] }, "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", diff --git a/controls/C-0074-containersmountingdockersocket.json b/controls/C-0074-containersmountingdockersocket.json index 9c952d1e6..4551f3c32 100644 --- a/controls/C-0074-containersmountingdockersocket.json +++ b/controls/C-0074-containersmountingdockersocket.json @@ -3,7 +3,8 @@ "attributes": { "armoBuiltin": true, "controlTypeTags": [ - "devops" + "devops", + "smartRemediation" ] }, "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", diff --git a/go.mod b/go.mod index 242866559..7ae162e49 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/kubescape/regolibrary go 1.19 require ( - github.com/armosec/armoapi-go v0.0.211 + github.com/armosec/armoapi-go v0.0.256 github.com/go-gota/gota v0.12.0 - github.com/kubescape/opa-utils v0.0.263 + github.com/kubescape/opa-utils v0.0.272-0.20231120060016-d4326e009f3d github.com/stretchr/testify v1.8.4 go.uber.org/zap v1.24.0 k8s.io/utils v0.0.0-20230726121419-3b25d923346b diff --git a/go.sum b/go.sum index 2bd4a5642..6a2bd841c 100644 --- a/go.sum +++ b/go.sum @@ -74,8 +74,8 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/armosec/armoapi-go v0.0.211 h1:OS4D56sfoaU7T6FOCyrufE2Ttdzv9tP7MZkFxh82ll0= -github.com/armosec/armoapi-go v0.0.211/go.mod h1:4AEdwBrbS1YCAn/lZzV+cOOR9BPa0MTHYHiJDlR1uRQ= +github.com/armosec/armoapi-go v0.0.256 h1:eV8WWQ1r+2D0KHhLA6ux6lx67+uqkYe/uVHrOUFqz5c= +github.com/armosec/armoapi-go v0.0.256/go.mod h1:CJT5iH5VF30zjdQYXaQhsAm8IEHtM1T87HcFVXeLX54= github.com/armosec/gojay v1.2.15 h1:sSB2vnAvacUNkw9nzUYZKcPzhJOyk6/5LK2JCNdmoZY= github.com/armosec/gojay v1.2.15/go.mod h1:vzVAaay2TWJAngOpxu8aqLbye9jMgoKleuAOK+xsOts= github.com/armosec/utils-go v0.0.20 h1:bvr+TMumEYdMsGFGSsaQysST7K02nNROFvuajNuKPlw= @@ -348,8 +348,8 @@ github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525 h1:9wzR38Le github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525/go.mod h1:Al+yTE+vemECb/Myn2G9+2o2uFmMtphbkQmxf4OEHxE= github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847 h1:GGuS6pE6KGa5q7j9fkRN3p1eQw16/jLUMnPR8FT3O6M= github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847/go.mod h1:eBd6few7RYplnNNlHoe6d7jMmoE6Kx1emapJ91euBbY= -github.com/kubescape/opa-utils v0.0.263 h1:ZK9ubreFqjvwB0C3iCRWTmLvtvZmQ4ivcxsqJ4URbW8= -github.com/kubescape/opa-utils v0.0.263/go.mod h1:0Be6E+vHqjavl/JneqgyC+oXOdfs6s+V6YnFvBkIAsA= +github.com/kubescape/opa-utils v0.0.272-0.20231120060016-d4326e009f3d h1:PaZBtZauunzlfbgXMuBzCJ02gdGEXADkej63p8lZ0VA= +github.com/kubescape/opa-utils v0.0.272-0.20231120060016-d4326e009f3d/go.mod h1:VmplJnkhei6mDna+6z183k/HX6GOPgsXiwIlDW8mhKw= github.com/kubescape/rbac-utils v0.0.20 h1:1MMxsCsCZ3ntDi8f9ZYYcY+K7bv50bDW5ZvnGnhMhJw= github.com/kubescape/rbac-utils v0.0.20/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= From 69ced2be74d0fb812cfc700e20bb82b4d4b7de93 Mon Sep 17 00:00:00 2001 From: kooomix Date: Mon, 20 Nov 2023 08:55:46 +0200 Subject: [PATCH 033/195] fix go mod Signed-off-by: kooomix --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7ae162e49..8990fe76c 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/armosec/armoapi-go v0.0.256 github.com/go-gota/gota v0.12.0 - github.com/kubescape/opa-utils v0.0.272-0.20231120060016-d4326e009f3d + github.com/kubescape/opa-utils v0.0.263 github.com/stretchr/testify v1.8.4 go.uber.org/zap v1.24.0 k8s.io/utils v0.0.0-20230726121419-3b25d923346b diff --git a/go.sum b/go.sum index 6a2bd841c..b2260e3e5 100644 --- a/go.sum +++ b/go.sum @@ -348,8 +348,8 @@ github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525 h1:9wzR38Le github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525/go.mod h1:Al+yTE+vemECb/Myn2G9+2o2uFmMtphbkQmxf4OEHxE= github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847 h1:GGuS6pE6KGa5q7j9fkRN3p1eQw16/jLUMnPR8FT3O6M= github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847/go.mod h1:eBd6few7RYplnNNlHoe6d7jMmoE6Kx1emapJ91euBbY= -github.com/kubescape/opa-utils v0.0.272-0.20231120060016-d4326e009f3d h1:PaZBtZauunzlfbgXMuBzCJ02gdGEXADkej63p8lZ0VA= -github.com/kubescape/opa-utils v0.0.272-0.20231120060016-d4326e009f3d/go.mod h1:VmplJnkhei6mDna+6z183k/HX6GOPgsXiwIlDW8mhKw= +github.com/kubescape/opa-utils v0.0.263 h1:ZK9ubreFqjvwB0C3iCRWTmLvtvZmQ4ivcxsqJ4URbW8= +github.com/kubescape/opa-utils v0.0.263/go.mod h1:0Be6E+vHqjavl/JneqgyC+oXOdfs6s+V6YnFvBkIAsA= github.com/kubescape/rbac-utils v0.0.20 h1:1MMxsCsCZ3ntDi8f9ZYYcY+K7bv50bDW5ZvnGnhMhJw= github.com/kubescape/rbac-utils v0.0.20/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= From 295e78f7ab8ff3f2a383a2440eb294ffc222bb84 Mon Sep 17 00:00:00 2001 From: kooomix Date: Mon, 20 Nov 2023 11:15:33 +0200 Subject: [PATCH 034/195] fix control C-0021 Signed-off-by: kooomix --- rules/exposed-sensitive-interfaces-v1/raw.rego | 4 ++-- rules/exposed-sensitive-interfaces-v1/test/pod/expected.json | 2 +- .../test/workloads/expected.json | 2 +- .../test/workloads2/expected.json | 2 +- rules/exposed-sensitive-interfaces/raw.rego | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/rules/exposed-sensitive-interfaces-v1/raw.rego b/rules/exposed-sensitive-interfaces-v1/raw.rego index b606343bc..ea0d2b665 100644 --- a/rules/exposed-sensitive-interfaces-v1/raw.rego +++ b/rules/exposed-sensitive-interfaces-v1/raw.rego @@ -121,10 +121,10 @@ deny[msga] { wl_connectedto_service(wl, service) = paths{ count({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector) - paths = ["spec.selector.matchLabels", "service.spec.selector"] + paths = ["spec.selector.matchLabels", "spec.selector"] } wl_connectedto_service(wl, service) = paths { wl.spec.selector.matchLabels == service.spec.selector - paths = ["spec.selector.matchLabels", "service.spec.selector"] + paths = ["spec.selector.matchLabels", "spec.selector"] } \ No newline at end of file diff --git a/rules/exposed-sensitive-interfaces-v1/test/pod/expected.json b/rules/exposed-sensitive-interfaces-v1/test/pod/expected.json index 0a1bcdf1b..0f4ee56ca 100644 --- a/rules/exposed-sensitive-interfaces-v1/test/pod/expected.json +++ b/rules/exposed-sensitive-interfaces-v1/test/pod/expected.json @@ -1,6 +1,6 @@ [{ "alertMessage": "service: my-service is exposed", - "failedPaths": ["spec.selector.matchLabels", "service.spec.selector"], + "failedPaths": ["spec.selector.matchLabels", "spec.selector"], "fixPaths": [], "ruleStatus": "", "packagename": "armo_builtins", diff --git a/rules/exposed-sensitive-interfaces-v1/test/workloads/expected.json b/rules/exposed-sensitive-interfaces-v1/test/workloads/expected.json index ee046f52c..8f68ec332 100644 --- a/rules/exposed-sensitive-interfaces-v1/test/workloads/expected.json +++ b/rules/exposed-sensitive-interfaces-v1/test/workloads/expected.json @@ -1,6 +1,6 @@ [{ "alertMessage": "service: my-service is exposed", - "failedPaths": ["spec.selector.matchLabels", "service.spec.selector"], + "failedPaths": ["spec.selector.matchLabels", "spec.selector"], "fixPaths": [], "ruleStatus": "", "packagename": "armo_builtins", diff --git a/rules/exposed-sensitive-interfaces-v1/test/workloads2/expected.json b/rules/exposed-sensitive-interfaces-v1/test/workloads2/expected.json index ccaa9857f..0c1bc3f4a 100644 --- a/rules/exposed-sensitive-interfaces-v1/test/workloads2/expected.json +++ b/rules/exposed-sensitive-interfaces-v1/test/workloads2/expected.json @@ -3,7 +3,7 @@ "alertMessage": "service: jenkins-service is exposed", "failedPaths": [ "spec.selector.matchLabels", - "service.spec.selector" + "spec.selector" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/exposed-sensitive-interfaces/raw.rego b/rules/exposed-sensitive-interfaces/raw.rego index f33b9dcf8..2dccc002d 100644 --- a/rules/exposed-sensitive-interfaces/raw.rego +++ b/rules/exposed-sensitive-interfaces/raw.rego @@ -104,10 +104,10 @@ deny[msga] { wl_connectedto_service(wl, service) = paths{ count({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector) - paths = ["spec.selector.matchLabels", "service.spec.selector"] + paths = ["spec.selector.matchLabels","spec.selector"] } wl_connectedto_service(wl, service) = paths { wl.spec.selector.matchLabels == service.spec.selector - paths = ["spec.selector.matchLabels", "service.spec.selector"] + paths = ["spec.selector.matchLabels", "spec.selector"] } From af602a5a12dd0d6e5a711f28816fea895df2cbd5 Mon Sep 17 00:00:00 2001 From: kooomix Date: Tue, 21 Nov 2023 14:38:57 +0200 Subject: [PATCH 035/195] fix deletePaths and reviewPaths tests Signed-off-by: kooomix --- go.mod | 2 +- go.sum | 4 +- go.work | 5 - .../CVE-2021-25741/test/cronjob/expected.json | 2 + rules/CVE-2021-25741/test/pod/expected.json | 2 + .../test/workloads/expected.json | 1 + .../deployment-bad-image-name/expected.json | 3 + .../test/deployment-config-map/expected.json | 3 + rules/CVE-2022-0185/test/test/expected.json | 1 + rules/CVE-2022-0492/raw.rego | 6 +- .../test/cap_dac_override_fail/expected.json | 1 + .../test/no_new_privs_fail/expected.json | 1 + .../test/root_user_fail/expected.json | 1 + .../test/deployment/expected.json | 6 ++ .../alert-any-hostpath/test/pod/expected.json | 3 + .../test/failed/expected.json | 1 + .../test/deployment_eks_failed/expected.json | 6 ++ .../test/pod_eks_failed/expected.json | 3 + .../test/deployment/expected.json | 2 + .../test/fail/expected.json | 1 + .../invalid-config-no-value/expected.json | 3 + .../test/invalid-config-value/expected.json | 3 + .../test/both-mount-default/expected.json | 1 + .../test/both-mount/expected.json | 1 + .../test/sa-mount/expected.json | 1 + .../test/both-mount-default/expected.json | 1 + .../test/both-mount/expected.json | 1 + .../test/sa-mount/expected.json | 1 + .../expected.json | 14 +++ .../test/configmap/expected.json | 3 + .../test/cronjob/expected.json | 3 + .../container-hostPort/test/pod/expected.json | 3 + .../test/cronjob-failed/expected.json | 3 + .../test/pod-failed/expected.json | 3 + .../test/workload-failed/expected.json | 3 + .../test/cronjob-containerd/expected.json | 3 + .../test/cronjob-crio/expected.json | 3 + .../test/cronjob/expected.json | 3 + .../test/pod-containerd/expected.json | 3 + .../test/pod-crio/expected.json | 3 + .../test/pod/expected.json | 3 + .../test/workloads-containerd/expected.json | 3 + .../test/workloads-crio/expected.json | 3 + .../test/workloads/expected.json | 3 + .../test/csistoragecapacity/expected.json | 3 + .../test/cronjob/expected.json | 2 + .../test/pod/expected.json | 2 + .../test/workloads/expected.json | 2 + .../failed_ingress_tls_not_set/expected.json | 1 + .../test/endpoints/expected.json | 3 + .../test/endpointslice/expected.json | 3 + .../raw.rego | 2 + .../expected.json | 1 + .../test/fail-config-only/expected.json | 4 + .../fail-config-sensor-failed/expected.json | 1 + .../failed_clusterrolebinding/expected.json | 1 + .../expected.json | 1 + .../test/failed/expected.json | 98 ++++++++++++++++++- .../test/failed/expected.json | 64 +++++++++++- .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 64 +++++++++++- .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 36 ++++++- .../test/failed/expected.json | 64 +++++++++++- .../test/failed/expected.json | 64 +++++++++++- .../test/failed/expected.json | 4 + .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 64 +++++++++++- .../test/failed/expected.json | 6 ++ .../test/failed/expected.json | 32 +++++- .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 69 ++++++++++++- .../test/failed/expected.json | 32 +++++- .../test/failed/expected.json | 32 +++++- .../test/failed/expected.json | 94 +++++++++++++++++- .../test/failed/expected.json | 32 +++++- .../test/failed/expected.json | 94 +++++++++++++++++- .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 31 +++++- .../test/failed/expected.json | 31 +++++- .../test/failed/expected.json | 64 +++++++++++- .../test/failed/expected.json | 94 +++++++++++++++++- .../test/failed/expected.json | 64 +++++++++++- .../test/failed/expected.json | 36 ++++++- .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 32 +++++- .../test/failed/expected.json | 32 +++++- .../test/failed/expected.json | 64 +++++++++++- .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 65 +++++++++++- .../test/failed/expected.json | 65 +++++++++++- .../fail-argument-set-to-true/expected.json | 4 +- .../fail-argument-set-to-false/expected.json | 3 + .../test/fail-missing-argument/expected.json | 1 + .../fail-argument-set-to-true/expected.json | 3 + .../fail-argument-set-false/expected.json | 3 + .../test/fail-missing-argument/expected.json | 1 + .../fail-missing-cert-argument/expected.json | 3 + .../fail-missing-key-argument/expected.json | 3 + .../fail-missing-cert-argument/expected.json | 1 + .../fail-missing-key-argument/expected.json | 1 + .../test/fail-same-key-file/expected.json | 4 + .../test/test-failed/expected.json | 1 + .../test/clusterrole/expected.json | 1 + .../test/role/expected.json | 1 + .../test/test-failed/expected.json | 1 + .../test/pod/expected.json | 1 + .../test/workloads/expected.json | 1 + .../test/workloads2/expected.json | 4 + .../test/failed_with_ingress/expected.json | 3 + .../expected.json | 3 + .../expected.json | 3 + .../horizontalpodautoscaler/expected.json | 3 + .../test/cronjob/expected.json | 3 + .../test/pod/expected.json | 3 + .../test/workloads/expected.json | 3 + .../test/cronjob/expected.json | 1 + .../test/pod/expected.json | 1 + .../test/workload/expected.json | 2 + .../test/cronjob/expected.json | 4 + .../test/pod/expected.json | 4 + .../test/workload/expected.json | 4 + .../test/ingress/expected.json | 3 + .../test/cronjob/expected.json | 3 + .../test/pod/expected.json | 4 + .../test/workloads/expected.json | 3 + .../test/test/expected.json | 3 + .../test/test-failed/expected.json | 3 + .../test/test-failed/expected.json | 3 + .../raw.rego | 3 + .../test/fail-no-cli-and-config/expected.json | 1 + .../test/fail-sensor-failed/expected.json | 1 + .../test/invalid-cli-argument/expected.json | 1 + .../test/invalid-config-value/expected.json | 3 + rules/kubelet-event-qps/raw.rego | 1 + .../expected.json | 3 + .../test/fail-sensor-failed/expected.json | 1 + rules/kubelet-ip-tables/raw.rego | 2 + .../test/fail-sensor-failed/expected.json | 1 + .../test/fail-set-via-cli/expected.json | 1 + .../test/fail-set-via-config/expected.json | 3 + .../kubelet-protect-kernel-defaults/raw.rego | 3 + .../test/deny-config-file-false/expected.json | 3 + .../test/fail-no-config-and-cli/expected.json | 1 + .../test/fail-set-via-cli/expected.json | 1 + rules/kubelet-rotate-certificates/raw.rego | 2 + .../fail-cli-argument-set-false/expected.json | 1 + .../test/fail-sensor-failed/expected.json | 1 + .../expected.json | 3 + .../raw.rego | 2 + .../test/fail-config-file/expected.json | 3 + .../test/fail-sensor-failed/expected.json | 1 + .../test/fail-set-via-cli/expected.json | 1 + .../raw.rego | 2 + .../fail-cli-and-config-not-set/expected.json | 1 + .../test/fail-cli/expected.json | 1 + .../expected.json | 3 + .../test/lease/expected.json | 3 + .../test/cronjob-fixed-path/expected.json | 2 + .../test/cronjob/expected.json | 2 + .../test/deployment-fixed-path/expected.json | 1 + .../test/deployment/expected.json | 1 + .../test/pod/expected.json | 1 + .../test/persistentvolumeclaim/expected.json | 3 + .../test/poddisruptionbudget/expected.json | 3 + .../test/cronjob/expected.json | 1 + .../test/pod/expected.json | 1 + .../test/workload/expected.json | 3 + .../test/podtemplate/expected.json | 3 + .../test/fail-many-true/expected.json | 6 ++ .../test/fail-only-one-true/expected.json | 3 + .../test/fail-many-true/expected.json | 6 ++ .../test/fail-only-one-true/expected.json | 3 + .../test/fail-many-true/expected.json | 6 ++ .../test/fail-only-one-true/expected.json | 3 + .../test/fail-many-true/expected.json | 6 ++ .../test/fail-only-one-true/expected.json | 3 + .../test/fail-many-true/expected.json | 6 ++ .../test/fail-only-one-true/expected.json | 3 + .../test/fail-many-true/expected.json | 6 ++ .../test/fail-only-one-true/expected.json | 3 + .../test/fail-many-true/expected.json | 6 ++ .../test/fail-only-one-true/expected.json | 3 + .../test/test-failed/expected.json | 3 + .../test/failed/expected.json | 1 + .../test/config-fail/expected.json | 3 + .../test/replicationcontroller/expected.json | 3 + .../resources-cpu-limit-and-request/raw.rego | 6 ++ .../test/cronjob/expected.json | 2 + .../test/pod-only-limits/expected.json | 1 + .../test/pod-only-requests/expected.json | 1 + .../test/pod/expected.json | 2 + .../test/workload-exceeded/expected.json | 5 + .../test/workload/expected.json | 2 + .../test/workload-exceeded/expected.json | 6 ++ .../test/configmap/expected.json | 3 + .../raw.rego | 1 + .../test/failed/expected.json | 1 + .../test/role/expected.json | 1 + .../test/rolebinding/expected.json | 3 + .../expected.json | 4 + .../clusterrole-rolebinding/expected.json | 1 + .../test/role-rolebinding/expected.json | 1 + .../test/workload/expected.json | 2 +- .../test/cronjob/expected.json | 2 + .../test/pod/expected.json | 1 + .../test/workloads/expected.json | 2 + .../expected.json | 14 +++ .../test/role-rolebinding/expected.json | 15 +++ .../clusterrole-rolebinding/expected.json | 7 ++ .../test/role-rolebinding/expected.json | 1 + .../expected.json | 14 +++ .../clusterrole-rolebinding/expected.json | 1 + .../test/role-rolebinding/expected.json | 1 + .../expected.json | 14 +++ .../clusterrole-rolebinding/expected.json | 1 + .../test/role-rolebinding/expected.json | 1 + .../expected.json | 14 +++ .../clusterrole-rolebinding/expected.json | 1 + .../test/role-rolebinding/expected.json | 1 + .../expected.json | 14 +++ .../clusterrole-rolebinding/expected.json | 1 + .../test/role-rolebinding/expected.json | 1 + .../test/pod/expected.json | 1 + .../test/workloads/expected.json | 1 + .../expected.json | 14 +++ .../clusterrole-rolebinding/expected.json | 1 + .../test/role-rolebinding/expected.json | 1 + rules/rule-credentials-configmap/raw.rego | 2 +- .../test/test-base64/expected.json | 1 + .../test/test/expected.json | 2 + .../test/cronjob/expected.json | 3 + .../test/deployment/expected.json | 3 + .../test/pod/expected.json | 3 + .../test/workloads/expected.json | 3 + .../expected.json | 16 +++ .../clusterrole-rolebinding/expected.json | 1 + .../test/role-rolebinding/expected.json | 1 + .../test/cronjob/expected.json | 3 + .../test/workloads/expected.json | 3 + .../test/workloads/expected.json | 27 ++++- .../expected.json | 14 +++ .../clusterrole-rolebinding/expected.json | 1 + .../test/role-rolebinding/expected.json | 8 ++ .../test/cronjob/expected.json | 3 + .../test/pod/expected.json | 3 + .../test/workloads/expected.json | 3 + .../test/cronjob/expected.json | 3 + .../test/pod/expected.json | 3 + .../test/workloads/expected.json | 3 + .../test/gke/expected.json | 1 + .../test/service/expected.json | 3 + .../test/serviceaccount/expected.json | 3 + .../test/pod-mount-and-rb-bind/expected.json | 3 + .../test/cronjob/expected.json | 2 + .../set-fsgroup-value/test/pod/expected.json | 2 + .../test/workload/expected.json | 2 + .../test/cronjob/expected.json | 1 + .../test/pod/expected.json | 1 + .../test/workload/expected.json | 1 + .../test/cronjob/expected.json | 4 + .../test/pod/expected.json | 6 ++ .../test/workloads/expected.json | 3 + .../test/cronjob/expected.json | 1 + .../test/pod/expected.json | 1 + .../test/workload/expected.json | 1 + .../test/pod/expected.json | 3 + .../test/workloads/expected.json | 3 + .../test/failed_pod/expected.json | 3 + .../test/failed_pod_mounted/expected.json | 3 + .../test/failed/expected.json | 3 + testrunner/rego_test.go | 2 +- 275 files changed, 2747 insertions(+), 51 deletions(-) delete mode 100644 go.work diff --git a/go.mod b/go.mod index 8990fe76c..68c59d1c3 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/armosec/armoapi-go v0.0.256 github.com/go-gota/gota v0.12.0 - github.com/kubescape/opa-utils v0.0.263 + github.com/kubescape/opa-utils v0.0.272 github.com/stretchr/testify v1.8.4 go.uber.org/zap v1.24.0 k8s.io/utils v0.0.0-20230726121419-3b25d923346b diff --git a/go.sum b/go.sum index b2260e3e5..9d5fc8a1e 100644 --- a/go.sum +++ b/go.sum @@ -348,8 +348,8 @@ github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525 h1:9wzR38Le github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525/go.mod h1:Al+yTE+vemECb/Myn2G9+2o2uFmMtphbkQmxf4OEHxE= github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847 h1:GGuS6pE6KGa5q7j9fkRN3p1eQw16/jLUMnPR8FT3O6M= github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847/go.mod h1:eBd6few7RYplnNNlHoe6d7jMmoE6Kx1emapJ91euBbY= -github.com/kubescape/opa-utils v0.0.263 h1:ZK9ubreFqjvwB0C3iCRWTmLvtvZmQ4ivcxsqJ4URbW8= -github.com/kubescape/opa-utils v0.0.263/go.mod h1:0Be6E+vHqjavl/JneqgyC+oXOdfs6s+V6YnFvBkIAsA= +github.com/kubescape/opa-utils v0.0.272 h1:hqEuYGf/B2HuqbdVUtSsUGJopfXbQOgl3+KvFAu2Gd8= +github.com/kubescape/opa-utils v0.0.272/go.mod h1:VmplJnkhei6mDna+6z183k/HX6GOPgsXiwIlDW8mhKw= github.com/kubescape/rbac-utils v0.0.20 h1:1MMxsCsCZ3ntDi8f9ZYYcY+K7bv50bDW5ZvnGnhMhJw= github.com/kubescape/rbac-utils v0.0.20/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= diff --git a/go.work b/go.work deleted file mode 100644 index 1d56219a0..000000000 --- a/go.work +++ /dev/null @@ -1,5 +0,0 @@ -go 1.19 - -use ./testrunner -use . - diff --git a/rules/CVE-2021-25741/test/cronjob/expected.json b/rules/CVE-2021-25741/test/cronjob/expected.json index f90fd9157..5c26dbed8 100644 --- a/rules/CVE-2021-25741/test/cronjob/expected.json +++ b/rules/CVE-2021-25741/test/cronjob/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : mysql in CronJob : hello with subPath/subPathExpr", + "deletePaths": ["spec.jobTemplate.spec.template.spec.containers[0].volumeMounts[0].subPath"], "failedPaths": ["spec.jobTemplate.spec.template.spec.containers[0].volumeMounts[0].subPath"], "fixPaths": [], "ruleStatus": "", @@ -16,6 +17,7 @@ } }, { "alertMessage": "You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : php in CronJob : hello with subPath/subPathExpr", + "deletePaths": ["spec.jobTemplate.spec.template.spec.containers[1].volumeMounts[0].subPath"], "failedPaths": ["spec.jobTemplate.spec.template.spec.containers[1].volumeMounts[0].subPath"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/CVE-2021-25741/test/pod/expected.json b/rules/CVE-2021-25741/test/pod/expected.json index f8e5a0f59..127f35d27 100644 --- a/rules/CVE-2021-25741/test/pod/expected.json +++ b/rules/CVE-2021-25741/test/pod/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : mysql in pod : my-lamp-site with subPath/subPathExpr", + "deletePaths": ["spec.containers[0].volumeMounts[0].subPath"], "failedPaths": ["spec.containers[0].volumeMounts[0].subPath"], "fixPaths": [], "ruleStatus": "", @@ -16,6 +17,7 @@ } }, { "alertMessage": "You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : php in pod : my-lamp-site with subPath/subPathExpr", + "deletePaths": ["spec.containers[1].volumeMounts[0].subPath"], "failedPaths": ["spec.containers[1].volumeMounts[0].subPath"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/CVE-2021-25741/test/workloads/expected.json b/rules/CVE-2021-25741/test/workloads/expected.json index d684a63a6..53f9b9162 100644 --- a/rules/CVE-2021-25741/test/workloads/expected.json +++ b/rules/CVE-2021-25741/test/workloads/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : php in Deployment : my-deployment with subPath/subPathExpr", + "deletePaths": ["spec.template.spec.containers[1].volumeMounts[0].subPath"], "failedPaths": ["spec.template.spec.containers[1].volumeMounts[0].subPath"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/CVE-2021-25742/test/deployment-bad-image-name/expected.json b/rules/CVE-2021-25742/test/deployment-bad-image-name/expected.json index 8c276546a..2dd44131f 100644 --- a/rules/CVE-2021-25742/test/deployment-bad-image-name/expected.json +++ b/rules/CVE-2021-25742/test/deployment-bad-image-name/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "You may be vulnerable to CVE-2021-25742. Deployment test", + "reviewPaths": [ + "spec.template.spec.containers[0].image" + ], "failedPaths": [ "spec.template.spec.containers[0].image" ], diff --git a/rules/CVE-2021-25742/test/deployment-config-map/expected.json b/rules/CVE-2021-25742/test/deployment-config-map/expected.json index 8aafe02a9..fd3720c42 100644 --- a/rules/CVE-2021-25742/test/deployment-config-map/expected.json +++ b/rules/CVE-2021-25742/test/deployment-config-map/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "You may be vulnerable to CVE-2021-25742. Deployment test", + "reviewPaths": [ + "spec.template.spec.containers[0].image" + ], "failedPaths": [ "spec.template.spec.containers[0].image" ], diff --git a/rules/CVE-2022-0185/test/test/expected.json b/rules/CVE-2022-0185/test/test/expected.json index 9ee16a18a..3429acd00 100644 --- a/rules/CVE-2022-0185/test/test/expected.json +++ b/rules/CVE-2022-0185/test/test/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "You are vulnerable to CVE-2022-0185", + "reviewPaths": ["kernelVersion"], "failedPaths": ["kernelVersion"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/CVE-2022-0492/raw.rego b/rules/CVE-2022-0492/raw.rego index 64c2a42bb..13fd8f7f3 100644 --- a/rules/CVE-2022-0492/raw.rego +++ b/rules/CVE-2022-0492/raw.rego @@ -165,7 +165,7 @@ deny[msga] { "alertMessage": "You may be vulnerable to CVE-2022-0492", "packagename": "armo_builtins", "alertScore": 4, - "deletePaths": [result], + "reviewPaths": [result], "failedPaths": [result], "fixPaths": [], "alertObject": { @@ -197,7 +197,7 @@ deny[msga] { "alertMessage": "You may be vulnerable to CVE-2022-0492", "packagename": "armo_builtins", "alertScore": 4, - "deletePaths": [result], + "reviewPaths": [result], "failedPaths": [result], "fixPaths": [], "alertObject": { @@ -228,7 +228,7 @@ deny[msga] { "alertMessage": "You may be vulnerable to CVE-2022-0492", "packagename": "armo_builtins", "alertScore": 4, - "deletePaths": [result], + "reviewPaths": [result], "failedPaths": [result], "fixPaths": [], "alertObject": { diff --git a/rules/CVE-2022-0492/test/cap_dac_override_fail/expected.json b/rules/CVE-2022-0492/test/cap_dac_override_fail/expected.json index f595d6d0d..e37846e08 100644 --- a/rules/CVE-2022-0492/test/cap_dac_override_fail/expected.json +++ b/rules/CVE-2022-0492/test/cap_dac_override_fail/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "You may be vulnerable to CVE-2022-0492", + "reviewPaths": ["spec.template.spec.containers[0].securityContext.capabilities.add[0]"], "failedPaths": ["spec.template.spec.containers[0].securityContext.capabilities.add[0]"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/CVE-2022-0492/test/no_new_privs_fail/expected.json b/rules/CVE-2022-0492/test/no_new_privs_fail/expected.json index 24529d2a7..2face1fb8 100644 --- a/rules/CVE-2022-0492/test/no_new_privs_fail/expected.json +++ b/rules/CVE-2022-0492/test/no_new_privs_fail/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "You may be vulnerable to CVE-2022-0492", + "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.runAsNonRoot", diff --git a/rules/CVE-2022-0492/test/root_user_fail/expected.json b/rules/CVE-2022-0492/test/root_user_fail/expected.json index 15a4c2acb..4ae347b67 100644 --- a/rules/CVE-2022-0492/test/root_user_fail/expected.json +++ b/rules/CVE-2022-0492/test/root_user_fail/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "You may be vulnerable to CVE-2022-0492", + "reviewPaths": ["spec.containers[0].securityContext.runAsUser"], "failedPaths": ["spec.containers[0].securityContext.runAsUser"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/alert-any-hostpath/test/deployment/expected.json b/rules/alert-any-hostpath/test/deployment/expected.json index 2e28d5105..7c9507a2d 100644 --- a/rules/alert-any-hostpath/test/deployment/expected.json +++ b/rules/alert-any-hostpath/test/deployment/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Deployment: my-deployment has: test-volume as hostPath volume", + "deletePaths": [ + "spec.template.spec.volumes[0].hostPath.path" + ], "failedPaths": [ "spec.template.spec.volumes[0].hostPath.path" ], @@ -25,6 +28,9 @@ }, { "alertMessage": "Deployment: my-deployment has: test-volume2 as hostPath volume", + "deletePaths": [ + "spec.template.spec.volumes[1].hostPath.path" + ], "failedPaths": [ "spec.template.spec.volumes[1].hostPath.path" ], diff --git a/rules/alert-any-hostpath/test/pod/expected.json b/rules/alert-any-hostpath/test/pod/expected.json index a88f4447b..d93123451 100644 --- a/rules/alert-any-hostpath/test/pod/expected.json +++ b/rules/alert-any-hostpath/test/pod/expected.json @@ -1,5 +1,8 @@ { "alertMessage": "pod: test-pd has: test-volume as hostPath volume", + "deletePaths": [ + "spec.volumes[0].hostPath.path" + ], "failedPaths": [ "spec.volumes[0].hostPath.path" ], diff --git a/rules/alert-container-optimized-os-not-in-use/test/failed/expected.json b/rules/alert-container-optimized-os-not-in-use/test/failed/expected.json index 225d80dfe..058b8af4a 100644 --- a/rules/alert-container-optimized-os-not-in-use/test/failed/expected.json +++ b/rules/alert-container-optimized-os-not-in-use/test/failed/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Prefer using Container-Optimized OS when possible", + "ReviewPaths": ["status.nodeInfo.osImage"], "failedPaths": ["status.nodeInfo.osImage"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/expected.json b/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/expected.json index bf74987c4..98f764d96 100644 --- a/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/expected.json +++ b/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Deployment: my-deployment has: test-volume as volume with potential credentials access.", + "deletePaths": [ + "spec.template.spec.volumes[0].hostPath.path" + ], "failedPaths": [ "spec.template.spec.volumes[0].hostPath.path" ], @@ -25,6 +28,9 @@ }, { "alertMessage": "Deployment: my-deployment has: test-volume2 as volume with potential credentials access.", + "deletePaths": [ + "spec.template.spec.volumes[1].hostPath.path" + ], "failedPaths": [ "spec.template.spec.volumes[1].hostPath.path" ], diff --git a/rules/alert-mount-potential-credentials-paths/test/pod_eks_failed/expected.json b/rules/alert-mount-potential-credentials-paths/test/pod_eks_failed/expected.json index 588e86d77..4636386ac 100644 --- a/rules/alert-mount-potential-credentials-paths/test/pod_eks_failed/expected.json +++ b/rules/alert-mount-potential-credentials-paths/test/pod_eks_failed/expected.json @@ -1,5 +1,8 @@ { "alertMessage": "Pod: test-pd has: test-volume as volume with potential credentials access.", + "deletePaths": [ + "spec.volumes[0].hostPath.path" + ], "failedPaths": [ "spec.volumes[0].hostPath.path" ], diff --git a/rules/alert-rw-hostpath/test/deployment/expected.json b/rules/alert-rw-hostpath/test/deployment/expected.json index 9781faf1d..79fdc914c 100644 --- a/rules/alert-rw-hostpath/test/deployment/expected.json +++ b/rules/alert-rw-hostpath/test/deployment/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Deployment: my-deployment has: test-volume as hostPath volume", + "deletePaths": ["spec.template.spec.containers[0].volumeMounts[0].readOnly"], "failedPaths": ["spec.template.spec.containers[0].volumeMounts[0].readOnly"], "fixPaths": [], "ruleStatus": "", @@ -19,6 +20,7 @@ } }, { "alertMessage": "Deployment: my-deployment has: test-volume as hostPath volume", + "deletePaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.template.spec.containers[0].volumeMounts[1].readOnly", diff --git a/rules/anonymous-access-enabled/test/fail/expected.json b/rules/anonymous-access-enabled/test/fail/expected.json index f3757ef2f..22406d5f3 100644 --- a/rules/anonymous-access-enabled/test/fail/expected.json +++ b/rules/anonymous-access-enabled/test/fail/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "the following RoleBinding: system:public-info-viewer gives permissions to anonymous users", + "deletePaths": ["subjects[1]"], "failedPaths": ["subjects[1]"], "fixPaths": null, "ruleStatus": "", diff --git a/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-no-value/expected.json b/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-no-value/expected.json index e8f1d1429..d89d4a6ed 100644 --- a/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-no-value/expected.json +++ b/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-no-value/expected.json @@ -16,6 +16,9 @@ } }, "alertScore": 7, + "reviewPaths": [ + "authentication.anonymous.enabled" + ], "failedPaths": [ "authentication.anonymous.enabled" ], diff --git a/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-value/expected.json b/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-value/expected.json index 28962d390..3e0dd71cd 100644 --- a/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-value/expected.json +++ b/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-value/expected.json @@ -16,6 +16,9 @@ } }, "alertScore": 7, + "reviewPaths": [ + "authentication.anonymous.enabled" + ], "failedPaths": [ "authentication.anonymous.enabled" ], diff --git a/rules/automount-default-service-account/test/both-mount-default/expected.json b/rules/automount-default-service-account/test/both-mount-default/expected.json index 58f508865..f5d3e170e 100644 --- a/rules/automount-default-service-account/test/both-mount-default/expected.json +++ b/rules/automount-default-service-account/test/both-mount-default/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "the following service account: default in the following namespace: default mounts service account tokens in pods by default", + "deletePaths": [], "failedPaths": [], "fixPaths": [{ "path": "automountServiceAccountToken", diff --git a/rules/automount-default-service-account/test/both-mount/expected.json b/rules/automount-default-service-account/test/both-mount/expected.json index 38d6f3e6c..a34721287 100644 --- a/rules/automount-default-service-account/test/both-mount/expected.json +++ b/rules/automount-default-service-account/test/both-mount/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "the following service account: default in the following namespace: default mounts service account tokens in pods by default", + "deletePaths": ["automountServiceAccountToken"], "failedPaths": ["automountServiceAccountToken"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/automount-default-service-account/test/sa-mount/expected.json b/rules/automount-default-service-account/test/sa-mount/expected.json index 38d6f3e6c..a34721287 100644 --- a/rules/automount-default-service-account/test/sa-mount/expected.json +++ b/rules/automount-default-service-account/test/sa-mount/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "the following service account: default in the following namespace: default mounts service account tokens in pods by default", + "deletePaths": ["automountServiceAccountToken"], "failedPaths": ["automountServiceAccountToken"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/automount-service-account/test/both-mount-default/expected.json b/rules/automount-service-account/test/both-mount-default/expected.json index 58f508865..8e717a17a 100644 --- a/rules/automount-service-account/test/both-mount-default/expected.json +++ b/rules/automount-service-account/test/both-mount-default/expected.json @@ -1,6 +1,7 @@ [{ "alertMessage": "the following service account: default in the following namespace: default mounts service account tokens in pods by default", "failedPaths": [], + "deletePaths": [], "fixPaths": [{ "path": "automountServiceAccountToken", "value": "false" diff --git a/rules/automount-service-account/test/both-mount/expected.json b/rules/automount-service-account/test/both-mount/expected.json index 38d6f3e6c..a34721287 100644 --- a/rules/automount-service-account/test/both-mount/expected.json +++ b/rules/automount-service-account/test/both-mount/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "the following service account: default in the following namespace: default mounts service account tokens in pods by default", + "deletePaths": ["automountServiceAccountToken"], "failedPaths": ["automountServiceAccountToken"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/automount-service-account/test/sa-mount/expected.json b/rules/automount-service-account/test/sa-mount/expected.json index 38d6f3e6c..a34721287 100644 --- a/rules/automount-service-account/test/sa-mount/expected.json +++ b/rules/automount-service-account/test/sa-mount/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "the following service account: default in the following namespace: default mounts service account tokens in pods by default", + "deletePaths": ["automountServiceAccountToken"], "failedPaths": ["automountServiceAccountToken"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/cluster-admin-role/test/clusterrole-clusterrolebinding/expected.json b/rules/cluster-admin-role/test/clusterrole-clusterrolebinding/expected.json index 1ef9b1b2a..455a17e54 100644 --- a/rules/cluster-admin-role/test/clusterrole-clusterrolebinding/expected.json +++ b/rules/cluster-admin-role/test/clusterrole-clusterrolebinding/expected.json @@ -1,6 +1,13 @@ [ { "alertMessage": "Subject: Group-dev is bound to cluster-admin role", + "deletePaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[1]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[1]", @@ -71,6 +78,13 @@ }, { "alertMessage": "Subject: Group-manager is bound to cluster-admin role", + "deletePaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[1]", diff --git a/rules/configmap-in-default-namespace/test/configmap/expected.json b/rules/configmap-in-default-namespace/test/configmap/expected.json index 656beee22..e7cec61ce 100644 --- a/rules/configmap-in-default-namespace/test/configmap/expected.json +++ b/rules/configmap-in-default-namespace/test/configmap/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "ConfigMap: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/container-hostPort/test/cronjob/expected.json b/rules/container-hostPort/test/cronjob/expected.json index 9f52c82f7..30628ea84 100644 --- a/rules/container-hostPort/test/cronjob/expected.json +++ b/rules/container-hostPort/test/cronjob/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Container: influxdb in CronJob: hello has Host-port", + "deletePaths": [ + "spec.jobTemplate.spec.template.spec.containers[0].ports[1].hostPort" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.containers[0].ports[1].hostPort" ], diff --git a/rules/container-hostPort/test/pod/expected.json b/rules/container-hostPort/test/pod/expected.json index cc444736a..1ee69de26 100644 --- a/rules/container-hostPort/test/pod/expected.json +++ b/rules/container-hostPort/test/pod/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Container: influxdb has Host-port", + "deletePaths": [ + "spec.containers[0].ports[0].hostPort" + ], "failedPaths": [ "spec.containers[0].ports[0].hostPort" ], diff --git a/rules/container-image-repository/test/cronjob-failed/expected.json b/rules/container-image-repository/test/cronjob-failed/expected.json index 85752a127..71c7be5e6 100644 --- a/rules/container-image-repository/test/cronjob-failed/expected.json +++ b/rules/container-image-repository/test/cronjob-failed/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "image 'influxdb' in container 'influxdb' comes from untrusted registry", + "reviewPaths": [ + "spec.jobTemplate.spec.template.spec.containers[0].image" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.containers[0].image" ], diff --git a/rules/container-image-repository/test/pod-failed/expected.json b/rules/container-image-repository/test/pod-failed/expected.json index 0aa9115a3..a58b50872 100644 --- a/rules/container-image-repository/test/pod-failed/expected.json +++ b/rules/container-image-repository/test/pod-failed/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "image 'debian' in container 'command-demo-container' comes from untrusted registry", + "reviewPaths": [ + "spec.containers[0].image" + ], "failedPaths": [ "spec.containers[0].image" ], diff --git a/rules/container-image-repository/test/workload-failed/expected.json b/rules/container-image-repository/test/workload-failed/expected.json index 9f714e929..fef767bd4 100644 --- a/rules/container-image-repository/test/workload-failed/expected.json +++ b/rules/container-image-repository/test/workload-failed/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "image 'k8s.gcb.io/goproxy:0.1' in container 'goproxy' comes from untrusted registry", + "reviewPaths": [ + "spec.template.spec.containers[0].image" + ], "failedPaths": [ "spec.template.spec.containers[0].image" ], diff --git a/rules/containers-mounting-docker-socket/test/cronjob-containerd/expected.json b/rules/containers-mounting-docker-socket/test/cronjob-containerd/expected.json index e68280d60..436eb3bfc 100644 --- a/rules/containers-mounting-docker-socket/test/cronjob-containerd/expected.json +++ b/rules/containers-mounting-docker-socket/test/cronjob-containerd/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "volume: test-volume in CronJob: hello has mounting to Docker internals.", + "deletePaths": [ + "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" ], diff --git a/rules/containers-mounting-docker-socket/test/cronjob-crio/expected.json b/rules/containers-mounting-docker-socket/test/cronjob-crio/expected.json index e68280d60..436eb3bfc 100644 --- a/rules/containers-mounting-docker-socket/test/cronjob-crio/expected.json +++ b/rules/containers-mounting-docker-socket/test/cronjob-crio/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "volume: test-volume in CronJob: hello has mounting to Docker internals.", + "deletePaths": [ + "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" ], diff --git a/rules/containers-mounting-docker-socket/test/cronjob/expected.json b/rules/containers-mounting-docker-socket/test/cronjob/expected.json index e68280d60..436eb3bfc 100644 --- a/rules/containers-mounting-docker-socket/test/cronjob/expected.json +++ b/rules/containers-mounting-docker-socket/test/cronjob/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "volume: test-volume in CronJob: hello has mounting to Docker internals.", + "deletePaths": [ + "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" ], diff --git a/rules/containers-mounting-docker-socket/test/pod-containerd/expected.json b/rules/containers-mounting-docker-socket/test/pod-containerd/expected.json index ee28e47f2..602a58262 100644 --- a/rules/containers-mounting-docker-socket/test/pod-containerd/expected.json +++ b/rules/containers-mounting-docker-socket/test/pod-containerd/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "volume: test-volume in pod: test-pd has mounting to Docker internals.", + "deletePaths": [ + "spec.volumes[0].hostPath.path" + ], "failedPaths": [ "spec.volumes[0].hostPath.path" ], diff --git a/rules/containers-mounting-docker-socket/test/pod-crio/expected.json b/rules/containers-mounting-docker-socket/test/pod-crio/expected.json index ee28e47f2..602a58262 100644 --- a/rules/containers-mounting-docker-socket/test/pod-crio/expected.json +++ b/rules/containers-mounting-docker-socket/test/pod-crio/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "volume: test-volume in pod: test-pd has mounting to Docker internals.", + "deletePaths": [ + "spec.volumes[0].hostPath.path" + ], "failedPaths": [ "spec.volumes[0].hostPath.path" ], diff --git a/rules/containers-mounting-docker-socket/test/pod/expected.json b/rules/containers-mounting-docker-socket/test/pod/expected.json index ee28e47f2..602a58262 100644 --- a/rules/containers-mounting-docker-socket/test/pod/expected.json +++ b/rules/containers-mounting-docker-socket/test/pod/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "volume: test-volume in pod: test-pd has mounting to Docker internals.", + "deletePaths": [ + "spec.volumes[0].hostPath.path" + ], "failedPaths": [ "spec.volumes[0].hostPath.path" ], diff --git a/rules/containers-mounting-docker-socket/test/workloads-containerd/expected.json b/rules/containers-mounting-docker-socket/test/workloads-containerd/expected.json index ebbe78199..e142cca61 100644 --- a/rules/containers-mounting-docker-socket/test/workloads-containerd/expected.json +++ b/rules/containers-mounting-docker-socket/test/workloads-containerd/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "volume: test-volume2 in Deployment: my-deployment has mounting to Docker internals.", + "deletePaths": [ + "spec.template.spec.volumes[1].hostPath.path" + ], "failedPaths": [ "spec.template.spec.volumes[1].hostPath.path" ], diff --git a/rules/containers-mounting-docker-socket/test/workloads-crio/expected.json b/rules/containers-mounting-docker-socket/test/workloads-crio/expected.json index ebbe78199..e142cca61 100644 --- a/rules/containers-mounting-docker-socket/test/workloads-crio/expected.json +++ b/rules/containers-mounting-docker-socket/test/workloads-crio/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "volume: test-volume2 in Deployment: my-deployment has mounting to Docker internals.", + "deletePaths": [ + "spec.template.spec.volumes[1].hostPath.path" + ], "failedPaths": [ "spec.template.spec.volumes[1].hostPath.path" ], diff --git a/rules/containers-mounting-docker-socket/test/workloads/expected.json b/rules/containers-mounting-docker-socket/test/workloads/expected.json index ebbe78199..e142cca61 100644 --- a/rules/containers-mounting-docker-socket/test/workloads/expected.json +++ b/rules/containers-mounting-docker-socket/test/workloads/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "volume: test-volume2 in Deployment: my-deployment has mounting to Docker internals.", + "deletePaths": [ + "spec.template.spec.volumes[1].hostPath.path" + ], "failedPaths": [ "spec.template.spec.volumes[1].hostPath.path" ], diff --git a/rules/csistoragecapacity-in-default-namespace/test/csistoragecapacity/expected.json b/rules/csistoragecapacity-in-default-namespace/test/csistoragecapacity/expected.json index 0c9059593..286e75534 100644 --- a/rules/csistoragecapacity-in-default-namespace/test/csistoragecapacity/expected.json +++ b/rules/csistoragecapacity-in-default-namespace/test/csistoragecapacity/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "CSIStorageCapacity: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/drop-capability-netraw/test/cronjob/expected.json b/rules/drop-capability-netraw/test/cronjob/expected.json index 9a0ee5d1a..b52f6557f 100644 --- a/rules/drop-capability-netraw/test/cronjob/expected.json +++ b/rules/drop-capability-netraw/test/cronjob/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Cronjob: hello does not drop the capability NET_RAW", + "deletePaths": [], "failedPaths": [], "fixPaths": [ { @@ -25,6 +26,7 @@ }, { "alertMessage": "Cronjob: hello does not drop the capability NET_RAW", + "deletePaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/drop-capability-netraw/test/pod/expected.json b/rules/drop-capability-netraw/test/pod/expected.json index 5e9b32afd..5f76fb9a9 100644 --- a/rules/drop-capability-netraw/test/pod/expected.json +++ b/rules/drop-capability-netraw/test/pod/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Pod: audit-pod does not drop the capability NET_RAW", + "deletePaths": [], "failedPaths": [], "fixPaths": [ { @@ -28,6 +29,7 @@ }, { "alertMessage": "Pod: audit-pod does not drop the capability NET_RAW", + "deletePaths": ["spec.containers[2].securityContext.capabilities.add"], "failedPaths": ["spec.containers[2].securityContext.capabilities.add"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/drop-capability-netraw/test/workloads/expected.json b/rules/drop-capability-netraw/test/workloads/expected.json index 0d6f3f776..969791ead 100644 --- a/rules/drop-capability-netraw/test/workloads/expected.json +++ b/rules/drop-capability-netraw/test/workloads/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Workload: my-deployment does not drop the capability NET_RAW", + "deletePaths": [], "failedPaths": [], "fixPaths": [ { @@ -28,6 +29,7 @@ }, { "alertMessage": "Workload: my-deployment does not drop the capability NET_RAW", + "deletePaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/encrypt-traffic-to-https-load-balancers-with-tls-certificates/test/failed_ingress_tls_not_set/expected.json b/rules/encrypt-traffic-to-https-load-balancers-with-tls-certificates/test/failed_ingress_tls_not_set/expected.json index d494ee7d6..9cf59bbd9 100644 --- a/rules/encrypt-traffic-to-https-load-balancers-with-tls-certificates/test/failed_ingress_tls_not_set/expected.json +++ b/rules/encrypt-traffic-to-https-load-balancers-with-tls-certificates/test/failed_ingress_tls_not_set/expected.json @@ -4,6 +4,7 @@ "alertMessage": "Ingress object has 'spec.tls' value not set.", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": ["spec.tls"], "failedPaths": ["spec.tls"], "fixPaths":[], "alertObject": { diff --git a/rules/endpoints-in-default-namespace/test/endpoints/expected.json b/rules/endpoints-in-default-namespace/test/endpoints/expected.json index e44bbe18f..8eedd1cb2 100644 --- a/rules/endpoints-in-default-namespace/test/endpoints/expected.json +++ b/rules/endpoints-in-default-namespace/test/endpoints/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Endpoints: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/endpointslice-in-default-namespace/test/endpointslice/expected.json b/rules/endpointslice-in-default-namespace/test/endpointslice/expected.json index 38fe3cd8d..cc5884a38 100644 --- a/rules/endpointslice-in-default-namespace/test/endpointslice/expected.json +++ b/rules/endpointslice-in-default-namespace/test/endpointslice/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "EndpointSlice: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/enforce-kubelet-client-tls-authentication-updated/raw.rego b/rules/enforce-kubelet-client-tls-authentication-updated/raw.rego index 5df9eea58..329995c8c 100644 --- a/rules/enforce-kubelet-client-tls-authentication-updated/raw.rego +++ b/rules/enforce-kubelet-client-tls-authentication-updated/raw.rego @@ -43,6 +43,7 @@ deny[msga] { msga := { "alertMessage": "kubelet client TLS authentication is not enabled", "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", @@ -69,6 +70,7 @@ deny[msga] { msga := { "alertMessage": "Failed to analyze config file", "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", diff --git a/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-argument-not-set/expected.json b/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-argument-not-set/expected.json index 682d16051..8f2838217 100644 --- a/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-argument-not-set/expected.json +++ b/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-argument-not-set/expected.json @@ -11,6 +11,7 @@ } }, "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins" diff --git a/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-only/expected.json b/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-only/expected.json index 5863c59b7..cd3800c6b 100644 --- a/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-only/expected.json +++ b/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-only/expected.json @@ -16,6 +16,10 @@ } }, "alertScore": 6, + "reviewPaths": [ + "authentication.x509.clientCAFile" + ], + "failedPaths": [ "authentication.x509.clientCAFile" ], diff --git a/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-sensor-failed/expected.json b/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-sensor-failed/expected.json index c2ce5c960..223ea52b4 100644 --- a/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-sensor-failed/expected.json +++ b/rules/enforce-kubelet-client-tls-authentication-updated/test/fail-config-sensor-failed/expected.json @@ -12,6 +12,7 @@ } }, "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins" diff --git a/rules/ensure-default-service-accounts-has-only-default-roles/test/failed_clusterrolebinding/expected.json b/rules/ensure-default-service-accounts-has-only-default-roles/test/failed_clusterrolebinding/expected.json index d83d2a4c7..261928aa8 100644 --- a/rules/ensure-default-service-accounts-has-only-default-roles/test/failed_clusterrolebinding/expected.json +++ b/rules/ensure-default-service-accounts-has-only-default-roles/test/failed_clusterrolebinding/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "ClusterRoleBinding: read-secrets-global has for ServiceAccount 'default' rules bound to it that are not defaults", + "deletePaths": ["subjects[0]"], "failedPaths": ["subjects[0]"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/ensure-default-service-accounts-has-only-default-roles/test/failed_clusterrolebinding_boots_none_default/expected.json b/rules/ensure-default-service-accounts-has-only-default-roles/test/failed_clusterrolebinding_boots_none_default/expected.json index 88114f643..751e75ab5 100644 --- a/rules/ensure-default-service-accounts-has-only-default-roles/test/failed_clusterrolebinding_boots_none_default/expected.json +++ b/rules/ensure-default-service-accounts-has-only-default-roles/test/failed_clusterrolebinding_boots_none_default/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "ClusterRoleBinding: read-secrets-global has for ServiceAccount 'default' rules bound to it that are not defaults", + "deletePaths": ["subjects[0]"], "failedPaths": ["subjects[0]"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers/test/failed/expected.json b/rules/ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers/test/failed/expected.json index 46fd37e27..fe72ba3d7 100644 --- a/rules/ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers/test/failed/expected.json +++ b/rules/ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers/test/failed/expected.json @@ -1 +1,97 @@ -[{"alertMessage":"The API server is not configured to use strong cryptographic ciphers","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"The API server is not configured to use strong cryptographic ciphers","failedPaths":["spec.containers[0].command[1]"],"fixPaths":[{"path":"spec.containers[0].command[1]","value":"--tls-cipher-suites=Foo,TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"The API server is not configured to use strong cryptographic ciphers","failedPaths":["spec.containers[0].command[1]"],"fixPaths":[{"path":"spec.containers[0].command[1]","value":"--tls-cipher-suites=TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "The API server is not configured to use strong cryptographic ciphers", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "The API server is not configured to use strong cryptographic ciphers", + "reviewPaths": [ + "spec.containers[0].command[1]" + ], + "failedPaths": [ + "spec.containers[0].command[1]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[1]", + "value": "--tls-cipher-suites=Foo,TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "The API server is not configured to use strong cryptographic ciphers", + "reviewPaths": [ + "spec.containers[0].command[1]" + ], + "failedPaths": [ + "spec.containers[0].command[1]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[1]", + "value": "--tls-cipher-suites=TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set/test/failed/expected.json b/rules/ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set/test/failed/expected.json index 8ccf0c9e5..222e29df0 100644 --- a/rules/ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set/test/failed/expected.json +++ b/rules/ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set/test/failed/expected.json @@ -1 +1,63 @@ -[{"alertMessage":"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers","failedPaths":["spec.containers[0].command[5]"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers","failedPaths":["spec.containers[0].command[5]"],"fixPaths":[{"path":"spec.containers[0].command[5]","value":"--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers", + "reviewPaths": [ + "spec.containers[0].command[5]" + ], + "failedPaths": [ + "spec.containers[0].command[5]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers", + "reviewPaths": [ + "spec.containers[0].command[5]" + ], + "failedPaths": [ + "spec.containers[0].command[5]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[5]", + "value": "--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set/test/failed/expected.json b/rules/ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set/test/failed/expected.json index b2430a68e..11a2442ac 100644 --- a/rules/ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set/test/failed/expected.json +++ b/rules/ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"Admission control policy is not set to AlwaysPullImages","failedPaths":["spec.containers[0].command[5]"],"fixPaths":[{"path":"spec.containers[0].command[5]","value":"--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,AlwaysPullImages"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"Admission control policy is not set to AlwaysPullImages","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--enable-admission-plugins=AlwaysPullImages"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "Admission control policy is not set to AlwaysPullImages", + "reviewPaths": [ + "spec.containers[0].command[5]" + ], + "failedPaths": [ + "spec.containers[0].command[5]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[5]", + "value": "--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,AlwaysPullImages" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "Admission control policy is not set to AlwaysPullImages", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--enable-admission-plugins=AlwaysPullImages" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-admission-control-plugin-EventRateLimit-is-set/test/failed/expected.json b/rules/ensure-that-the-admission-control-plugin-EventRateLimit-is-set/test/failed/expected.json index 9fd39ec40..2589a4b02 100644 --- a/rules/ensure-that-the-admission-control-plugin-EventRateLimit-is-set/test/failed/expected.json +++ b/rules/ensure-that-the-admission-control-plugin-EventRateLimit-is-set/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack","failedPaths":["spec.containers[0].command[5]"],"fixPaths":[{"path":"spec.containers[0].command[5]","value":"--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,EventRateLimit"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--enable-admission-plugins=EventRateLimit"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack", + "reviewPaths": [ + "spec.containers[0].command[5]" + ], + "failedPaths": [ + "spec.containers[0].command[5]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[5]", + "value": "--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,EventRateLimit" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--enable-admission-plugins=EventRateLimit" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set/test/failed/expected.json b/rules/ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set/test/failed/expected.json index 4dd96783f..3f87526fa 100644 --- a/rules/ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set/test/failed/expected.json +++ b/rules/ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set/test/failed/expected.json @@ -1 +1,63 @@ -[{"alertMessage":"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers","failedPaths":["spec.containers[0].command[6]"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers","failedPaths":["spec.containers[0].command[6]"],"fixPaths":[{"path":"spec.containers[0].command[6]","value":"--disable-admission-plugins=ServiceAccount"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers", + "reviewPaths": [ + "spec.containers[0].command[6]" + ], + "failedPaths": [ + "spec.containers[0].command[6]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers", + "reviewPaths": [ + "spec.containers[0].command[6]" + ], + "failedPaths": [ + "spec.containers[0].command[6]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[6]", + "value": "--disable-admission-plugins=ServiceAccount" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-admission-control-plugin-NodeRestriction-is-set/test/failed/expected.json b/rules/ensure-that-the-admission-control-plugin-NodeRestriction-is-set/test/failed/expected.json index 99f0bce5e..1464cab1c 100644 --- a/rules/ensure-that-the-admission-control-plugin-NodeRestriction-is-set/test/failed/expected.json +++ b/rules/ensure-that-the-admission-control-plugin-NodeRestriction-is-set/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"NodeRestriction is not enabled","failedPaths":["spec.containers[0].command[5]"],"fixPaths":[{"path":"spec.containers[0].command[5]","value":"--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"NodeRestriction is not enabled","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--enable-admission-plugins=NodeRestriction"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "NodeRestriction is not enabled", + "reviewPaths": [ + "spec.containers[0].command[5]" + ], + "failedPaths": [ + "spec.containers[0].command[5]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[5]", + "value": "--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "NodeRestriction is not enabled", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--enable-admission-plugins=NodeRestriction" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used/test/failed/expected.json b/rules/ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used/test/failed/expected.json index 2da08b9e4..3bc427d1a 100644 --- a/rules/ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used/test/failed/expected.json +++ b/rules/ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used/test/failed/expected.json @@ -1 +1,35 @@ -[{"alertMessage":"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster","failedPaths":["spec.containers[0].command[5]"],"fixPaths":[{"path":"spec.containers[0].command[5]","value":"--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,SecurityContextDeny"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster", + "reviewPaths": [ + "spec.containers[0].command[5]" + ], + "failedPaths": [ + "spec.containers[0].command[5]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[5]", + "value": "--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,SecurityContextDeny" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-admission-control-plugin-ServiceAccount-is-set/test/failed/expected.json b/rules/ensure-that-the-admission-control-plugin-ServiceAccount-is-set/test/failed/expected.json index f1a159300..c6bd6c54b 100644 --- a/rules/ensure-that-the-admission-control-plugin-ServiceAccount-is-set/test/failed/expected.json +++ b/rules/ensure-that-the-admission-control-plugin-ServiceAccount-is-set/test/failed/expected.json @@ -1 +1,63 @@ -[{"alertMessage":"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers","failedPaths":["spec.containers[0].command[6]"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers","failedPaths":["spec.containers[0].command[6]"],"fixPaths":[{"path":"spec.containers[0].command[6]","value":"--disable-admission-plugins=NamespaceLifecycle"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers", + "reviewPaths": [ + "spec.containers[0].command[6]" + ], + "failedPaths": [ + "spec.containers[0].command[6]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers", + "reviewPaths": [ + "spec.containers[0].command[6]" + ], + "failedPaths": [ + "spec.containers[0].command[6]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[6]", + "value": "--disable-admission-plugins=NamespaceLifecycle" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set/test/failed/expected.json b/rules/ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set/test/failed/expected.json index 08805ee09..68a6bfa1a 100644 --- a/rules/ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set/test/failed/expected.json @@ -1 +1,63 @@ -[{"alertMessage":"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers","failedPaths":["spec.containers[0].command[5]"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers","failedPaths":["spec.containers[0].command[5]"],"fixPaths":[{"path":"spec.containers[0].command[5]","value":"--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers", + "reviewPaths": [ + "spec.containers[0].command[5]" + ], + "failedPaths": [ + "spec.containers[0].command[5]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers", + "reviewPaths": [ + "spec.containers[0].command[5]" + ], + "failedPaths": [ + "spec.containers[0].command[5]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[5]", + "value": "--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false/test/failed/expected.json b/rules/ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false/test/failed/expected.json index 6c9bf3c06..9858e4961 100644 --- a/rules/ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false/test/failed/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "anonymous requests is enabled", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { @@ -29,6 +30,9 @@ }, { "alertMessage": "anonymous requests is enabled", + "reviewPaths": [ + "spec.containers[0].command[26]" + ], "failedPaths": [ "spec.containers[0].command[26]" ], diff --git a/rules/ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate/test/failed/expected.json index fe6fb2a79..8a82186d6 100644 --- a/rules/ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"Audit log retention period is 29 days, which is too small (should be at least 30 days)","failedPaths":["spec.containers[0].command[2]"],"fixPaths":[{"path":"spec.containers[0].command[2]","value":"--audit-log-maxage=30"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"Audit log retention period is not set","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--audit-log-maxage=30"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "Audit log retention period is 29 days, which is too small (should be at least 30 days)", + "reviewPaths": [ + "spec.containers[0].command[2]" + ], + "failedPaths": [ + "spec.containers[0].command[2]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[2]", + "value": "--audit-log-maxage=30" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "Audit log retention period is not set", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--audit-log-maxage=30" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate/test/failed/expected.json index 4eae1f730..e63b2768d 100644 --- a/rules/ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate/test/failed/expected.json @@ -1 +1,63 @@ -[{"alertMessage":"Audit log max backup is not set","failedPaths":["spec.containers[0].command[26]"],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--audit-log-maxbackup=YOUR_VALUE"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"Please validate that the audit log max backup is set to an appropriate value","failedPaths":["spec.containers[0].command[27]"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "Audit log max backup is not set", + "reviewPaths": [ + "spec.containers[0].command[26]" + ], + "failedPaths": [ + "spec.containers[0].command[26]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--audit-log-maxbackup=YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "Please validate that the audit log max backup is set to an appropriate value", + "reviewPaths": [ + "spec.containers[0].command[27]" + ], + "failedPaths": [ + "spec.containers[0].command[27]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate/test/failed/expected.json index adf8d606d..0e6b91b5a 100644 --- a/rules/ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate/test/failed/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Please validate that audit-log-maxsize has an appropriate value", + "reviewPaths": [ + "spec.containers[0].command[26]" + ], "failedPaths": [ "spec.containers[0].command[26]" ], @@ -26,6 +29,9 @@ }, { "alertMessage": "Audit log max size not set", + "reviewPaths": [ + "spec.containers[0].command[27]" + ], "failedPaths": [ "spec.containers[0].command[27]" ], diff --git a/rules/ensure-that-the-api-server-audit-log-path-argument-is-set/test/failed/expected.json b/rules/ensure-that-the-api-server-audit-log-path-argument-is-set/test/failed/expected.json index 51ee20640..965d3f5f8 100644 --- a/rules/ensure-that-the-api-server-audit-log-path-argument-is-set/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-audit-log-path-argument-is-set/test/failed/expected.json @@ -1 +1,31 @@ -[{"alertMessage":"kubernetes API Server is not audited","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--audit-log-path=/var/log/apiserver/audit.log"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "kubernetes API Server is not audited", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--audit-log-path=/var/log/apiserver/audit.log" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-authorization-mode-argument-includes-Node/test/failed/expected.json b/rules/ensure-that-the-api-server-authorization-mode-argument-includes-Node/test/failed/expected.json index 096c4d3d8..3c8ee3a85 100644 --- a/rules/ensure-that-the-api-server-authorization-mode-argument-includes-Node/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-authorization-mode-argument-includes-Node/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"kubelet nodes can read objects that are not associated with them","failedPaths":["spec.containers[0].command[3]"],"fixPaths":[{"path":"spec.containers[0].command[3]","value":"--authorization-mode=RBAC,Node"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"kubelet nodes can read objects that are not associated with them","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--authorization-mode=Node"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "kubelet nodes can read objects that are not associated with them", + "reviewPaths": [ + "spec.containers[0].command[3]" + ], + "failedPaths": [ + "spec.containers[0].command[3]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[3]", + "value": "--authorization-mode=RBAC,Node" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "kubelet nodes can read objects that are not associated with them", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--authorization-mode=Node" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-authorization-mode-argument-includes-RBAC/test/failed/expected.json b/rules/ensure-that-the-api-server-authorization-mode-argument-includes-RBAC/test/failed/expected.json index 0521132de..e0fdb3898 100644 --- a/rules/ensure-that-the-api-server-authorization-mode-argument-includes-RBAC/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-authorization-mode-argument-includes-RBAC/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"RBAC is not enabled","failedPaths":["spec.containers[0].command[3]"],"fixPaths":[{"path":"spec.containers[0].command[3]","value":"--authorization-mode=Node,RBAC"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"RBAC is not enabled","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--authorization-mode=RBAC"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "RBAC is not enabled", + "reviewPaths": [ + "spec.containers[0].command[3]" + ], + "failedPaths": [ + "spec.containers[0].command[3]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[3]", + "value": "--authorization-mode=Node,RBAC" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "RBAC is not enabled", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--authorization-mode=RBAC" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow/test/failed/expected.json b/rules/ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow/test/failed/expected.json index fbf3223dc..627662511 100644 --- a/rules/ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow/test/failed/expected.json @@ -1 +1,68 @@ -[{"alertMessage":"AlwaysAllow authorization mode is enabled","failedPaths":["spec.containers[0].command[3]"],"fixPaths":[{"path":"spec.containers[0].command[3]","value":"--authorization-mode=RBAC"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"AlwaysAllow authorization mode is enabled","failedPaths":["spec.containers[0].command[3]"],"fixPaths":[{"path":"spec.containers[0].command[3]","value":"--authorization-mode=Node"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "AlwaysAllow authorization mode is enabled", + "reviewPaths": [ + "spec.containers[0].command[3]" + ], + "failedPaths": [ + "spec.containers[0].command[3]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[3]", + "value": "--authorization-mode=RBAC" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "AlwaysAllow authorization mode is enabled", + "reviewPaths": [ + "spec.containers[0].command[3]" + ], + "failedPaths": [ + "spec.containers[0].command[3]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[3]", + "value": "--authorization-mode=Node" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate/test/failed/expected.json index 2429e7128..3e5bc2d61 100644 --- a/rules/ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate/test/failed/expected.json @@ -1 +1,31 @@ -[{"alertMessage":"API server communication is not encrypted properly","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[25]","value":"--client-ca-file=\u003cpath/to/client-ca.crt\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "API server communication is not encrypted properly", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[25]", + "value": "--client-ca-file=\u003cpath/to/client-ca.crt\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate/test/failed/expected.json index 5b1ff99de..0e43d8a50 100644 --- a/rules/ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate/test/failed/expected.json @@ -1 +1,31 @@ -[{"alertMessage":"API server is not configured to use SSL Certificate Authority file for etcd","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[25]","value":"--etcd-cafile=\u003cpath/to/ca-file.crt\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "API server is not configured to use SSL Certificate Authority file for etcd", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[25]", + "value": "--etcd-cafile=\u003cpath/to/ca-file.crt\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate/test/failed/expected.json index aba7bb300..143e0323c 100644 --- a/rules/ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate/test/failed/expected.json @@ -1 +1,93 @@ -[{"alertMessage":"etcd is not configured to use TLS properly","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[27]","value":"--etcd-keyfile=\u003cpath/to/client-key-file.key\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"etcd is not configured to use TLS properly","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[25]","value":"--etcd-certfile=\u003cpath/to/client-certificate-file.crt\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"etcd is not configured to use TLS properly","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[25]","value":"--etcd-certfile=\u003cpath/to/client-certificate-file.crt\u003e"},{"path":"spec.containers[0].command[26]","value":"--etcd-keyfile=\u003cpath/to/client-key-file.key\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "etcd is not configured to use TLS properly", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[27]", + "value": "--etcd-keyfile=\u003cpath/to/client-key-file.key\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "etcd is not configured to use TLS properly", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[25]", + "value": "--etcd-certfile=\u003cpath/to/client-certificate-file.crt\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "etcd is not configured to use TLS properly", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[25]", + "value": "--etcd-certfile=\u003cpath/to/client-certificate-file.crt\u003e" + }, + { + "path": "spec.containers[0].command[26]", + "value": "--etcd-keyfile=\u003cpath/to/client-key-file.key\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate/test/failed/expected.json index 53a17a968..ecfa754ca 100644 --- a/rules/ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate/test/failed/expected.json @@ -1 +1,31 @@ -[{"alertMessage":"TLS certificate authority file is not specified","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--kubelet-certificate-authority=\u003cpath/to/ca.crt\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "TLS certificate authority file is not specified", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--kubelet-certificate-authority=\u003cpath/to/ca.crt\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate/test/failed/expected.json index fe38e8941..89f5a0bec 100644 --- a/rules/ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate/test/failed/expected.json @@ -1 +1,93 @@ -[{"alertMessage":"certificate based kubelet authentication is not enabled","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[29]","value":"--kubelet-client-key=\u003cpath/to/appropriate/file\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"certificate based kubelet authentication is not enabled","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[28]","value":"--kubelet-client-certificate=\u003cpath/to/appropriate/file\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"certificate based kubelet authentication is not enabled","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--kubelet-client-certificate=\u003cpath/to/appropriate/file\u003e"},{"path":"spec.containers[0].command[27]","value":"--kubelet-client-key=\u003cpath/to/appropriate/file\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "certificate based kubelet authentication is not enabled", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[29]", + "value": "--kubelet-client-key=\u003cpath/to/appropriate/file\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "certificate based kubelet authentication is not enabled", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[28]", + "value": "--kubelet-client-certificate=\u003cpath/to/appropriate/file\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "certificate based kubelet authentication is not enabled", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--kubelet-client-certificate=\u003cpath/to/appropriate/file\u003e" + }, + { + "path": "spec.containers[0].command[27]", + "value": "--kubelet-client-key=\u003cpath/to/appropriate/file\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-profiling-argument-is-set-to-false/test/failed/expected.json b/rules/ensure-that-the-api-server-profiling-argument-is-set-to-false/test/failed/expected.json index 1831fa39c..7f1ef7088 100644 --- a/rules/ensure-that-the-api-server-profiling-argument-is-set-to-false/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-profiling-argument-is-set-to-false/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"profiling is enabled. This could potentially be exploited to uncover system and program details.","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[26]","value":"--profiling=false"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"profiling is enabled. This could potentially be exploited to uncover system and program details.","failedPaths":["spec.containers[0].command[3]"],"fixPaths":[{"path":"spec.containers[0].command[3]","value":"--profiling=false"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "profiling is enabled. This could potentially be exploited to uncover system and program details.", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[26]", + "value": "--profiling=false" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "profiling is enabled. This could potentially be exploited to uncover system and program details.", + "reviewPaths": [ + "spec.containers[0].command[3]" + ], + "failedPaths": [ + "spec.containers[0].command[3]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[3]", + "value": "--profiling=false" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate/test/failed/expected.json index 760d4376e..b40aab351 100644 --- a/rules/ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate/test/failed/expected.json @@ -1 +1,30 @@ -[{"alertMessage":"Please validate the request timeout flag is set to an appropriate value","failedPaths":["spec.containers[0].command[1]"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "Please validate the request timeout flag is set to an appropriate value", + "reviewPaths": [ + "spec.containers[0].command[1]" + ], + "failedPaths": [ + "spec.containers[0].command[1]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-secure-port-argument-is-not-set-to-0/test/failed/expected.json b/rules/ensure-that-the-api-server-secure-port-argument-is-not-set-to-0/test/failed/expected.json index d086fbdaf..ac540417c 100644 --- a/rules/ensure-that-the-api-server-secure-port-argument-is-not-set-to-0/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-secure-port-argument-is-not-set-to-0/test/failed/expected.json @@ -1 +1,30 @@ -[{"alertMessage":"the secure port is disabled","failedPaths":["spec.containers[0].command[1]"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "the secure port is disabled", + "reviewPaths": [ + "spec.containers[0].command[1]" + ], + "failedPaths": [ + "spec.containers[0].command[1]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true/test/failed/expected.json b/rules/ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true/test/failed/expected.json index 161dbf5fc..c337e46b7 100644 --- a/rules/ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true/test/failed/expected.json @@ -1 +1,63 @@ -[{"alertMessage":"anonymous requests is enabled","failedPaths":["spec.containers[0].command[2]"],"fixPaths":[{"path":"spec.containers[0].command[2]","value":"--allow-privileged=true --service-account-lookup=true"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"anonymous requests is enabled","failedPaths":["spec.containers[0].command[2]"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "anonymous requests is enabled", + "reviewPaths": [ + "spec.containers[0].command[2]" + ], + "failedPaths": [ + "spec.containers[0].command[2]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[2]", + "value": "--allow-privileged=true --service-account-lookup=true" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "anonymous requests is enabled", + "reviewPaths": [ + "spec.containers[0].command[2]" + ], + "failedPaths": [ + "spec.containers[0].command[2]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate/test/failed/expected.json index 5db9be818..5d746c322 100644 --- a/rules/ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate/test/failed/expected.json @@ -1 +1,93 @@ -[{"alertMessage":"API server is not configured to serve only HTTPS traffic","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[25]","value":"--tls-cert-file=\u003cpath/to/tls-certificate-file.crt\u003e"},{"path":"spec.containers[0].command[26]","value":"--tls-private-key-file=\u003cpath/to/tls-key-file.key\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"API server is not configured to serve only HTTPS traffic","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[27]","value":"--tls-private-key-file=\u003cpath/to/tls-key-file.key\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"API server is not configured to serve only HTTPS traffic","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[25]","value":"--tls-cert-file=\u003cpath/to/tls-certificate-file.crt\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "API server is not configured to serve only HTTPS traffic", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[25]", + "value": "--tls-cert-file=\u003cpath/to/tls-certificate-file.crt\u003e" + }, + { + "path": "spec.containers[0].command[26]", + "value": "--tls-private-key-file=\u003cpath/to/tls-key-file.key\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "API server is not configured to serve only HTTPS traffic", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[27]", + "value": "--tls-private-key-file=\u003cpath/to/tls-key-file.key\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "API server is not configured to serve only HTTPS traffic", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[25]", + "value": "--tls-cert-file=\u003cpath/to/tls-certificate-file.crt\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-api-server-token-auth-file-parameter-is-not-set/test/failed/expected.json b/rules/ensure-that-the-api-server-token-auth-file-parameter-is-not-set/test/failed/expected.json index 0440e2eb2..445a23325 100644 --- a/rules/ensure-that-the-api-server-token-auth-file-parameter-is-not-set/test/failed/expected.json +++ b/rules/ensure-that-the-api-server-token-auth-file-parameter-is-not-set/test/failed/expected.json @@ -1 +1,63 @@ -[{"alertMessage":"API server TLS is not configured","failedPaths":["spec.containers[0].command[26]"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}},{"alertMessage":"API server TLS is not configured","failedPaths":["spec.containers[0].command[25]"],"fixPaths":[{"path":"spec.containers[0].command[25]","value":"--tls-private-key-file=/var/lib/minikube/certs/apiserver.key"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-apiserver","tier":"control-plane"},"name":"kube-apiserver-minikube"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "API server TLS is not configured", + "reviewPaths": [ + "spec.containers[0].command[26]" + ], + "failedPaths": [ + "spec.containers[0].command[26]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + }, + { + "alertMessage": "API server TLS is not configured", + "reviewPaths": [ + "spec.containers[0].command[25]" + ], + "failedPaths": [ + "spec.containers[0].command[25]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[25]", + "value": "--tls-private-key-file=/var/lib/minikube/certs/apiserver.key" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + }, + "name": "kube-apiserver-minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true/test/failed/expected.json b/rules/ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true/test/failed/expected.json index 1b9330992..f691b0543 100644 --- a/rules/ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true/test/failed/expected.json +++ b/rules/ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true/test/failed/expected.json @@ -1 +1,35 @@ -[{"alertMessage":"`RotateKubeletServerCertificate` is set to false on the controller manager","failedPaths":["spec.containers[0].command[4]"],"fixPaths":[{"path":"spec.containers[0].command[4]","value":"--feature-gates=RotateKubeletServerCertificate=true"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "`RotateKubeletServerCertificate` is set to false on the controller manager", + "reviewPaths": [ + "spec.containers[0].command[4]" + ], + "failedPaths": [ + "spec.containers[0].command[4]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[4]", + "value": "--feature-gates=RotateKubeletServerCertificate=true" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1/test/failed/expected.json b/rules/ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1/test/failed/expected.json index ab7654159..ad619bdf5 100644 --- a/rules/ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1/test/failed/expected.json +++ b/rules/ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"the Controller Manager API service is not bound to a localhost interface only","failedPaths":["spec.containers[0].command[4]"],"fixPaths":[{"path":"spec.containers[0].command[4]","value":"--bind-address=127.0.0.1"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}},{"alertMessage":"the Controller Manager API service is not bound to a localhost interface only","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[17]","value":"--bind-address=127.0.0.1"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "the Controller Manager API service is not bound to a localhost interface only", + "reviewPaths": [ + "spec.containers[0].command[4]" + ], + "failedPaths": [ + "spec.containers[0].command[4]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[4]", + "value": "--bind-address=127.0.0.1" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + }, + { + "alertMessage": "the Controller Manager API service is not bound to a localhost interface only", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[17]", + "value": "--bind-address=127.0.0.1" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-controller-manager-profiling-argument-is-set-to-false/test/failed/expected.json b/rules/ensure-that-the-controller-manager-profiling-argument-is-set-to-false/test/failed/expected.json index 1d4b8229a..034d65c3b 100644 --- a/rules/ensure-that-the-controller-manager-profiling-argument-is-set-to-false/test/failed/expected.json +++ b/rules/ensure-that-the-controller-manager-profiling-argument-is-set-to-false/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"profiling is enabled for the kube-controller-manager","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[17]","value":"--profiling=false"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}},{"alertMessage":"profiling is enabled for the kube-controller-manager","failedPaths":["spec.containers[0].command[1]"],"fixPaths":[{"path":"spec.containers[0].command[1]","value":"--profiling=false"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "profiling is enabled for the kube-controller-manager", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[17]", + "value": "--profiling=false" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + }, + { + "alertMessage": "profiling is enabled for the kube-controller-manager", + "reviewPaths": [ + "spec.containers[0].command[1]" + ], + "failedPaths": [ + "spec.containers[0].command[1]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[1]", + "value": "--profiling=false" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate/test/failed/expected.json index 3ca1a46e2..aabf0c682 100644 --- a/rules/ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate/test/failed/expected.json @@ -1 +1,31 @@ -[{"alertMessage":"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[17]","value":"--root-ca-file=\u003cpath/to/key/ca.crt\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[17]", + "value": "--root-ca-file=\u003cpath/to/key/ca.crt\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate/test/failed/expected.json index 883baa4b5..f2447981d 100644 --- a/rules/ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate/test/failed/expected.json @@ -1 +1,31 @@ -[{"alertMessage":"service account token can not be rotated as needed","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[17]","value":"--service-account-private-key-file=\u003cpath/to/key/filename.key\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "service account token can not be rotated as needed", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[17]", + "value": "--service-account-private-key-file=\u003cpath/to/key/filename.key\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate/test/failed/expected.json b/rules/ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate/test/failed/expected.json index 968e35692..387f9bbb8 100644 --- a/rules/ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate/test/failed/expected.json +++ b/rules/ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate/test/failed/expected.json @@ -1 +1,63 @@ -[{"alertMessage":"--terminated-pod-gc-threshold flag not set to an appropriate value","failedPaths":["spec.containers[0].command[18]"],"fixPaths":[{"path":"spec.containers[0].command[18]","value":"--terminated-pod-gc-threshold=YOUR_VALUE"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}},{"alertMessage":"Please validate that --terminated-pod-gc-threshold is set to an appropriate value","failedPaths":["spec.containers[0].command[18]"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "--terminated-pod-gc-threshold flag not set to an appropriate value", + "reviewPaths": [ + "spec.containers[0].command[18]" + ], + "failedPaths": [ + "spec.containers[0].command[18]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[18]", + "value": "--terminated-pod-gc-threshold=YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + }, + { + "alertMessage": "Please validate that --terminated-pod-gc-threshold is set to an appropriate value", + "reviewPaths": [ + "spec.containers[0].command[18]" + ], + "failedPaths": [ + "spec.containers[0].command[18]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true/test/failed/expected.json b/rules/ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true/test/failed/expected.json index 005fd6b3d..784fd292e 100644 --- a/rules/ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true/test/failed/expected.json +++ b/rules/ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"--use-service-account-credentials is set to false in the controller manager","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[17]","value":"--use-service-account-credentials=true"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}},{"alertMessage":"--use-service-account-credentials is set to false in the controller manager","failedPaths":["spec.containers[0].command[17]"],"fixPaths":[{"path":"spec.containers[0].command[17]","value":"--use-service-account-credentials=true"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-controller-manager","tier":"control-plane"},"name":"kube-controller-manager"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "--use-service-account-credentials is set to false in the controller manager", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[17]", + "value": "--use-service-account-credentials=true" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + }, + { + "alertMessage": "--use-service-account-credentials is set to false in the controller manager", + "reviewPaths": [ + "spec.containers[0].command[17]" + ], + "failedPaths": [ + "spec.containers[0].command[17]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[17]", + "value": "--use-service-account-credentials=true" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + }, + "name": "kube-controller-manager" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1/test/failed/expected.json b/rules/ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1/test/failed/expected.json index f7b4aa588..9d63baf84 100644 --- a/rules/ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1/test/failed/expected.json +++ b/rules/ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"the kube scheduler is not bound to a localhost interface only","failedPaths":["spec.containers[0].command[3]"],"fixPaths":[{"path":"spec.containers[0].command[3]","value":"--bind-address=127.0.0.1"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-scheduler","tier":"control-plane"},"name":"kube-scheduler"}}]}},{"alertMessage":"the kube scheduler is not bound to a localhost interface only","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[5]","value":"--bind-address=127.0.0.1"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-scheduler","tier":"control-plane"},"name":"kube-scheduler"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "the kube scheduler is not bound to a localhost interface only", + "reviewPaths": [ + "spec.containers[0].command[3]" + ], + "failedPaths": [ + "spec.containers[0].command[3]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[3]", + "value": "--bind-address=127.0.0.1" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-scheduler", + "tier": "control-plane" + }, + "name": "kube-scheduler" + } + } + ] + } + }, + { + "alertMessage": "the kube scheduler is not bound to a localhost interface only", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[5]", + "value": "--bind-address=127.0.0.1" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-scheduler", + "tier": "control-plane" + }, + "name": "kube-scheduler" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ensure-that-the-scheduler-profiling-argument-is-set-to-false/test/failed/expected.json b/rules/ensure-that-the-scheduler-profiling-argument-is-set-to-false/test/failed/expected.json index 5e161b4e7..48d2b0964 100644 --- a/rules/ensure-that-the-scheduler-profiling-argument-is-set-to-false/test/failed/expected.json +++ b/rules/ensure-that-the-scheduler-profiling-argument-is-set-to-false/test/failed/expected.json @@ -1 +1,64 @@ -[{"alertMessage":"profiling is enabled for the kube-scheduler","failedPaths":[],"fixPaths":[{"path":"spec.containers[0].command[6]","value":"--profiling=false"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-scheduler","tier":"control-plane"},"name":"kube-scheduler"}}]}},{"alertMessage":"profiling is enabled for the kube-scheduler","failedPaths":["spec.containers[0].command[1]"],"fixPaths":[{"path":"spec.containers[0].command[1]","value":"--profiling=false"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"Pod","metadata":{"labels":{"component":"kube-scheduler","tier":"control-plane"},"name":"kube-scheduler"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "profiling is enabled for the kube-scheduler", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].command[6]", + "value": "--profiling=false" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-scheduler", + "tier": "control-plane" + }, + "name": "kube-scheduler" + } + } + ] + } + }, + { + "alertMessage": "profiling is enabled for the kube-scheduler", + "reviewPaths": [ + "spec.containers[0].command[1]" + ], + "failedPaths": [ + "spec.containers[0].command[1]" + ], + "fixPaths": [ + { + "path": "spec.containers[0].command[1]", + "value": "--profiling=false" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "component": "kube-scheduler", + "tier": "control-plane" + }, + "name": "kube-scheduler" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/etcd-auto-tls-disabled/test/fail-argument-set-to-true/expected.json b/rules/etcd-auto-tls-disabled/test/fail-argument-set-to-true/expected.json index fe82b8f78..1fd9d4b44 100644 --- a/rules/etcd-auto-tls-disabled/test/fail-argument-set-to-true/expected.json +++ b/rules/etcd-auto-tls-disabled/test/fail-argument-set-to-true/expected.json @@ -1,7 +1,9 @@ [ { "alertMessage": "Auto tls is enabled. Clients are able to use self-signed certificates for TLS.", - "failedPaths": [ + "reviewPaths": [ + "spec.containers[0].command[1]" + ], "failedPaths": [ "spec.containers[0].command[1]" ], "fixPaths": [ diff --git a/rules/etcd-client-auth-cert/test/fail-argument-set-to-false/expected.json b/rules/etcd-client-auth-cert/test/fail-argument-set-to-false/expected.json index e432e6468..e1fc5f631 100644 --- a/rules/etcd-client-auth-cert/test/fail-argument-set-to-false/expected.json +++ b/rules/etcd-client-auth-cert/test/fail-argument-set-to-false/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Etcd server is not requiring a valid client certificate", + "reviewPaths": [ + "spec.containers[0].command[2]" + ], "failedPaths": [ "spec.containers[0].command[2]" ], diff --git a/rules/etcd-client-auth-cert/test/fail-missing-argument/expected.json b/rules/etcd-client-auth-cert/test/fail-missing-argument/expected.json index ef179075b..31b38f3c1 100644 --- a/rules/etcd-client-auth-cert/test/fail-missing-argument/expected.json +++ b/rules/etcd-client-auth-cert/test/fail-missing-argument/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Etcd server is not requiring a valid client certificate", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/etcd-peer-auto-tls-disabled/test/fail-argument-set-to-true/expected.json b/rules/etcd-peer-auto-tls-disabled/test/fail-argument-set-to-true/expected.json index 80b6737a8..14af8cab7 100644 --- a/rules/etcd-peer-auto-tls-disabled/test/fail-argument-set-to-true/expected.json +++ b/rules/etcd-peer-auto-tls-disabled/test/fail-argument-set-to-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.", + "reviewPaths": [ + "spec.containers[0].command[1]" + ], "failedPaths": [ "spec.containers[0].command[1]" ], diff --git a/rules/etcd-peer-client-auth-cert/test/fail-argument-set-false/expected.json b/rules/etcd-peer-client-auth-cert/test/fail-argument-set-false/expected.json index 4016b51a0..cda71b6e3 100644 --- a/rules/etcd-peer-client-auth-cert/test/fail-argument-set-false/expected.json +++ b/rules/etcd-peer-client-auth-cert/test/fail-argument-set-false/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Etcd server is not requiring a valid client certificate.", + "reviewPaths": [ + "spec.containers[0].command[11]" + ], "failedPaths": [ "spec.containers[0].command[11]" ], diff --git a/rules/etcd-peer-client-auth-cert/test/fail-missing-argument/expected.json b/rules/etcd-peer-client-auth-cert/test/fail-missing-argument/expected.json index 87fec1880..9abd7a316 100644 --- a/rules/etcd-peer-client-auth-cert/test/fail-missing-argument/expected.json +++ b/rules/etcd-peer-client-auth-cert/test/fail-missing-argument/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Etcd server is not requiring a valid client certificate.", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/etcd-peer-tls-enabled/test/fail-missing-cert-argument/expected.json b/rules/etcd-peer-tls-enabled/test/fail-missing-cert-argument/expected.json index b621b49b3..e5d60f27b 100644 --- a/rules/etcd-peer-tls-enabled/test/fail-missing-cert-argument/expected.json +++ b/rules/etcd-peer-tls-enabled/test/fail-missing-cert-argument/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Etcd encryption for peer connection is not enabled.", + "reviewPaths": [ + "spec.containers[0].command" + ], "failedPaths": [ "spec.containers[0].command" ], diff --git a/rules/etcd-peer-tls-enabled/test/fail-missing-key-argument/expected.json b/rules/etcd-peer-tls-enabled/test/fail-missing-key-argument/expected.json index da45b3baa..1d9600fb2 100644 --- a/rules/etcd-peer-tls-enabled/test/fail-missing-key-argument/expected.json +++ b/rules/etcd-peer-tls-enabled/test/fail-missing-key-argument/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Etcd encryption for peer connection is not enabled.", + "reviewPaths": [ + "spec.containers[0].command" + ], "failedPaths": [ "spec.containers[0].command" ], diff --git a/rules/etcd-tls-enabled/test/fail-missing-cert-argument/expected.json b/rules/etcd-tls-enabled/test/fail-missing-cert-argument/expected.json index 26c0d378f..1fd4a7a5b 100644 --- a/rules/etcd-tls-enabled/test/fail-missing-cert-argument/expected.json +++ b/rules/etcd-tls-enabled/test/fail-missing-cert-argument/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "etcd encryption is not enabled", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/etcd-tls-enabled/test/fail-missing-key-argument/expected.json b/rules/etcd-tls-enabled/test/fail-missing-key-argument/expected.json index d29cb6eab..2381198cd 100644 --- a/rules/etcd-tls-enabled/test/fail-missing-key-argument/expected.json +++ b/rules/etcd-tls-enabled/test/fail-missing-key-argument/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "etcd encryption is not enabled", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/etcd-unique-ca/test/fail-same-key-file/expected.json b/rules/etcd-unique-ca/test/fail-same-key-file/expected.json index af2042681..74bbd2202 100644 --- a/rules/etcd-unique-ca/test/fail-same-key-file/expected.json +++ b/rules/etcd-unique-ca/test/fail-same-key-file/expected.json @@ -29,6 +29,10 @@ ] }, "alertScore": 8, + "reviewPaths": [ + "spec.containers[0].command[15]", + "spec.containers[0].command[4]" + ], "failedPaths": [ "spec.containers[0].command[15]", "spec.containers[0].command[4]" diff --git a/rules/excessive_amount_of_vulnerabilities_pods/test/test-failed/expected.json b/rules/excessive_amount_of_vulnerabilities_pods/test/test-failed/expected.json index 8af711374..470a8c026 100644 --- a/rules/excessive_amount_of_vulnerabilities_pods/test/test-failed/expected.json +++ b/rules/excessive_amount_of_vulnerabilities_pods/test/test-failed/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "pod 'nginx' exposed with critical vulnerabilities", + "reviewPaths": ["status.containerStatuses[0].imageID"], "failedPaths": ["status.containerStatuses[0].imageID"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/exec-into-container-v1/test/clusterrole/expected.json b/rules/exec-into-container-v1/test/clusterrole/expected.json index 52b641cb4..e7d964550 100644 --- a/rules/exec-into-container-v1/test/clusterrole/expected.json +++ b/rules/exec-into-container-v1/test/clusterrole/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-dave can exec into containers", + "reviewPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/exec-into-container-v1/test/role/expected.json b/rules/exec-into-container-v1/test/role/expected.json index cdffa41ef..61d2e3c75 100644 --- a/rules/exec-into-container-v1/test/role/expected.json +++ b/rules/exec-into-container-v1/test/role/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can exec into containers", + "reviewPaths": ["relatedObjects[1].rules[0].resources[2]", "relatedObjects[1].rules[0].verbs[1]", "relatedObjects[1].rules[0].verbs[3]", "relatedObjects[1].rules[0].apiGroups[1]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[2]", "relatedObjects[1].rules[0].verbs[1]", "relatedObjects[1].rules[0].verbs[3]", "relatedObjects[1].rules[0].apiGroups[1]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/exposed-rce-pods/test/test-failed/expected.json b/rules/exposed-rce-pods/test/test-failed/expected.json index 5d7ee1112..d6b108076 100644 --- a/rules/exposed-rce-pods/test/test-failed/expected.json +++ b/rules/exposed-rce-pods/test/test-failed/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "pod 'nginx' exposed with rce vulnerability", + "reviewPaths": ["status.containerStatuses[0].imageID"], "failedPaths": ["status.containerStatuses[0].imageID"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/exposed-sensitive-interfaces-v1/test/pod/expected.json b/rules/exposed-sensitive-interfaces-v1/test/pod/expected.json index 0f4ee56ca..94e22a34c 100644 --- a/rules/exposed-sensitive-interfaces-v1/test/pod/expected.json +++ b/rules/exposed-sensitive-interfaces-v1/test/pod/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "service: my-service is exposed", + "reviewPaths": ["spec.selector.matchLabels", "spec.selector"], "failedPaths": ["spec.selector.matchLabels", "spec.selector"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/exposed-sensitive-interfaces-v1/test/workloads/expected.json b/rules/exposed-sensitive-interfaces-v1/test/workloads/expected.json index 8f68ec332..24da81f54 100644 --- a/rules/exposed-sensitive-interfaces-v1/test/workloads/expected.json +++ b/rules/exposed-sensitive-interfaces-v1/test/workloads/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "service: my-service is exposed", + "reviewPaths": ["spec.selector.matchLabels", "spec.selector"], "failedPaths": ["spec.selector.matchLabels", "spec.selector"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/exposed-sensitive-interfaces-v1/test/workloads2/expected.json b/rules/exposed-sensitive-interfaces-v1/test/workloads2/expected.json index 0c1bc3f4a..feb2448e5 100644 --- a/rules/exposed-sensitive-interfaces-v1/test/workloads2/expected.json +++ b/rules/exposed-sensitive-interfaces-v1/test/workloads2/expected.json @@ -1,6 +1,10 @@ [ { "alertMessage": "service: jenkins-service is exposed", + "reviewPaths": [ + "spec.selector.matchLabels", + "spec.selector" + ], "failedPaths": [ "spec.selector.matchLabels", "spec.selector" diff --git a/rules/exposure-to-internet/test/failed_with_ingress/expected.json b/rules/exposure-to-internet/test/failed_with_ingress/expected.json index 5ce02f86b..8a79e4654 100644 --- a/rules/exposure-to-internet/test/failed_with_ingress/expected.json +++ b/rules/exposure-to-internet/test/failed_with_ingress/expected.json @@ -51,6 +51,9 @@ ] } }, + "reviewPaths": [ + "spec.rules[0].http.paths[0].backend.service.name" + ], "failedPaths": [ "spec.rules[0].http.paths[0].backend.service.name" ], diff --git a/rules/exposure-to-internet/test/failed_with_service_loadbalancer/expected.json b/rules/exposure-to-internet/test/failed_with_service_loadbalancer/expected.json index 797e9c436..1a30f02bf 100644 --- a/rules/exposure-to-internet/test/failed_with_service_loadbalancer/expected.json +++ b/rules/exposure-to-internet/test/failed_with_service_loadbalancer/expected.json @@ -49,6 +49,9 @@ } } }, + "reviewPaths": [ + "spec.type" + ], "failedPaths": [ "spec.type" ], diff --git a/rules/exposure-to-internet/test/failed_with_service_nodeport/expected.json b/rules/exposure-to-internet/test/failed_with_service_nodeport/expected.json index 53167dd6c..f7b9f7d97 100644 --- a/rules/exposure-to-internet/test/failed_with_service_nodeport/expected.json +++ b/rules/exposure-to-internet/test/failed_with_service_nodeport/expected.json @@ -39,6 +39,9 @@ "type": "NodePort" } }, + "reviewPaths": [ + "spec.type" + ], "failedPaths": [ "spec.type" ], diff --git a/rules/horizontalpodautoscaler-in-default-namespace/test/horizontalpodautoscaler/expected.json b/rules/horizontalpodautoscaler-in-default-namespace/test/horizontalpodautoscaler/expected.json index 8292048c0..ebd69e1bb 100644 --- a/rules/horizontalpodautoscaler-in-default-namespace/test/horizontalpodautoscaler/expected.json +++ b/rules/horizontalpodautoscaler-in-default-namespace/test/horizontalpodautoscaler/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "HorizontalPodAutoscaler: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/host-network-access/test/cronjob/expected.json b/rules/host-network-access/test/cronjob/expected.json index d8d348a34..2f03d58c8 100644 --- a/rules/host-network-access/test/cronjob/expected.json +++ b/rules/host-network-access/test/cronjob/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "CronJob: hello has a pod connected to the host network", + "deletePaths": [ + "spec.jobTemplate.spec.template.spec.hostNetwork" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.hostNetwork" ], diff --git a/rules/host-network-access/test/pod/expected.json b/rules/host-network-access/test/pod/expected.json index e533ea29a..75c2753f3 100644 --- a/rules/host-network-access/test/pod/expected.json +++ b/rules/host-network-access/test/pod/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Pod: test is connected to the host network", + "deletePaths": [ + "spec.hostNetwork" + ], "failedPaths": [ "spec.hostNetwork" ], diff --git a/rules/host-network-access/test/workloads/expected.json b/rules/host-network-access/test/workloads/expected.json index 8c1efb482..4072332c4 100644 --- a/rules/host-network-access/test/workloads/expected.json +++ b/rules/host-network-access/test/workloads/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Deployment: my-deployment has a pod connected to the host network", + "deletePaths": [ + "spec.template.spec.hostNetwork" + ], "failedPaths": [ "spec.template.spec.hostNetwork" ], diff --git a/rules/host-pid-ipc-privileges/test/cronjob/expected.json b/rules/host-pid-ipc-privileges/test/cronjob/expected.json index 5e9a05aee..fbe463618 100644 --- a/rules/host-pid-ipc-privileges/test/cronjob/expected.json +++ b/rules/host-pid-ipc-privileges/test/cronjob/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "CronJob: hello has a pod with hostIPC enabled", + "deletePaths": ["spec.jobTemplate.spec.template.spec.hostIPC"], "failedPaths": ["spec.jobTemplate.spec.template.spec.hostIPC"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/host-pid-ipc-privileges/test/pod/expected.json b/rules/host-pid-ipc-privileges/test/pod/expected.json index 1e697ed90..b66d1f612 100644 --- a/rules/host-pid-ipc-privileges/test/pod/expected.json +++ b/rules/host-pid-ipc-privileges/test/pod/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Pod: test has hostPID enabled", + "deletePaths": ["spec.hostPID"], "failedPaths": ["spec.hostPID"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/host-pid-ipc-privileges/test/workload/expected.json b/rules/host-pid-ipc-privileges/test/workload/expected.json index 1aa6f25af..ca771da26 100644 --- a/rules/host-pid-ipc-privileges/test/workload/expected.json +++ b/rules/host-pid-ipc-privileges/test/workload/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Deployment: my-deployment has a pod with hostPID enabled", + "deletePaths": ["spec.template.spec.hostPID"], "failedPaths": ["spec.template.spec.hostPID"], "fixPaths": [], "ruleStatus": "", @@ -19,6 +20,7 @@ } }, { "alertMessage": "Deployment: my-deployment has a pod with hostIPC enabled", + "deletePaths": ["spec.template.spec.hostIPC"], "failedPaths": ["spec.template.spec.hostIPC"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/image-pull-policy-is-not-set-to-always/test/cronjob/expected.json b/rules/image-pull-policy-is-not-set-to-always/test/cronjob/expected.json index 875472f19..4ac54d87d 100644 --- a/rules/image-pull-policy-is-not-set-to-always/test/cronjob/expected.json +++ b/rules/image-pull-policy-is-not-set-to-always/test/cronjob/expected.json @@ -1,6 +1,10 @@ [ { "alertMessage": "container: php in cronjob: hello has 'latest' tag on image but imagePullPolicy is not set to 'Always'", + "reviewPaths": [ + "spec.jobTemplate.spec.template.spec.containers[1].image", + "spec.jobTemplate.spec.template.spec.containers[1].imagePullPolicy" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.containers[1].image", "spec.jobTemplate.spec.template.spec.containers[1].imagePullPolicy" diff --git a/rules/image-pull-policy-is-not-set-to-always/test/pod/expected.json b/rules/image-pull-policy-is-not-set-to-always/test/pod/expected.json index 8b670bdd3..e3d856341 100644 --- a/rules/image-pull-policy-is-not-set-to-always/test/pod/expected.json +++ b/rules/image-pull-policy-is-not-set-to-always/test/pod/expected.json @@ -1,6 +1,10 @@ [ { "alertMessage": "container: test in pod: test has 'latest' tag on image but imagePullPolicy is not set to 'Always'", + "reviewPaths": [ + "spec.containers[0].image", + "spec.containers[0].imagePullPolicy" + ], "failedPaths": [ "spec.containers[0].image", "spec.containers[0].imagePullPolicy" diff --git a/rules/image-pull-policy-is-not-set-to-always/test/workload/expected.json b/rules/image-pull-policy-is-not-set-to-always/test/workload/expected.json index 5c79a2929..be4c65876 100644 --- a/rules/image-pull-policy-is-not-set-to-always/test/workload/expected.json +++ b/rules/image-pull-policy-is-not-set-to-always/test/workload/expected.json @@ -1,6 +1,10 @@ [ { "alertMessage": "container: mysql in Deployment: my-deployment has 'latest' tag on image but imagePullPolicy is not set to 'Always'", + "reviewPaths": [ + "spec.template.spec.containers[0].image", + "spec.template.spec.containers[0].imagePullPolicy" + ], "failedPaths": [ "spec.template.spec.containers[0].image", "spec.template.spec.containers[0].imagePullPolicy" diff --git a/rules/ingress-in-default-namespace/test/ingress/expected.json b/rules/ingress-in-default-namespace/test/ingress/expected.json index 4a040855b..383137cfe 100644 --- a/rules/ingress-in-default-namespace/test/ingress/expected.json +++ b/rules/ingress-in-default-namespace/test/ingress/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Ingress: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/insecure-capabilities/test/cronjob/expected.json b/rules/insecure-capabilities/test/cronjob/expected.json index e03da180f..cde076ba4 100644 --- a/rules/insecure-capabilities/test/cronjob/expected.json +++ b/rules/insecure-capabilities/test/cronjob/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "container: mysql in cronjob: hello have dangerous capabilities", + "deletePaths": [ + "spec.jobTemplate.spec.template.spec.containers[0].securityContext.capabilities.add[0]" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.containers[0].securityContext.capabilities.add[0]" ], diff --git a/rules/insecure-capabilities/test/pod/expected.json b/rules/insecure-capabilities/test/pod/expected.json index c46a19734..1631fcccc 100644 --- a/rules/insecure-capabilities/test/pod/expected.json +++ b/rules/insecure-capabilities/test/pod/expected.json @@ -1,6 +1,10 @@ [ { "alertMessage": "container: test2 in pod: test have dangerous capabilities", + "deletePaths": [ + "spec.containers[1].securityContext.capabilities.add[0]", + "spec.containers[1].securityContext.capabilities.add[1]" + ], "failedPaths": [ "spec.containers[1].securityContext.capabilities.add[0]", "spec.containers[1].securityContext.capabilities.add[1]" diff --git a/rules/insecure-capabilities/test/workloads/expected.json b/rules/insecure-capabilities/test/workloads/expected.json index f08d87f4c..cb02491dc 100644 --- a/rules/insecure-capabilities/test/workloads/expected.json +++ b/rules/insecure-capabilities/test/workloads/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "container: php in workload: my-deployment have dangerous capabilities", + "deletePaths": [ + "spec.template.spec.containers[1].securityContext.capabilities.add[0]" + ], "failedPaths": [ "spec.template.spec.containers[1].securityContext.capabilities.add[0]" ], diff --git a/rules/insecure-port-flag/test/test/expected.json b/rules/insecure-port-flag/test/test/expected.json index 0c3505845..2048d964b 100644 --- a/rules/insecure-port-flag/test/test/expected.json +++ b/rules/insecure-port-flag/test/test/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "The API server container: kube-apiserver has insecure-port flag enabled", + "reviewPaths": [ + "spec.containers[0].command[11]" + ], "failedPaths": [ "spec.containers[0].command[11]" ], diff --git a/rules/k8s-audit-logs-enabled-native-cis/test/test-failed/expected.json b/rules/k8s-audit-logs-enabled-native-cis/test/test-failed/expected.json index 52d645e25..20ae19816 100644 --- a/rules/k8s-audit-logs-enabled-native-cis/test/test-failed/expected.json +++ b/rules/k8s-audit-logs-enabled-native-cis/test/test-failed/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "audit logs are not enabled", + "reviewPaths": [ + "spec.containers[0].command[11]" + ], "failedPaths": [ "spec.containers[0].command[11]" ], diff --git a/rules/k8s-audit-logs-enabled-native/test/test-failed/expected.json b/rules/k8s-audit-logs-enabled-native/test/test-failed/expected.json index 94e4a6581..10c154932 100644 --- a/rules/k8s-audit-logs-enabled-native/test/test-failed/expected.json +++ b/rules/k8s-audit-logs-enabled-native/test/test-failed/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "audit logs is not enabled", + "reviewPaths": [ + "spec.containers[0].command" + ], "failedPaths": [ "spec.containers[0].command" ], diff --git a/rules/kubelet-authorization-mode-alwaysAllow/raw.rego b/rules/kubelet-authorization-mode-alwaysAllow/raw.rego index 052efa4c2..b5387d85e 100644 --- a/rules/kubelet-authorization-mode-alwaysAllow/raw.rego +++ b/rules/kubelet-authorization-mode-alwaysAllow/raw.rego @@ -19,6 +19,7 @@ deny[msga] { msga := { "alertMessage": "Anonymous requests are enabled", "alertScore": 10, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", @@ -70,6 +71,7 @@ deny[msga] { msga := { "alertMessage": "Anonymous requests are enabled", "alertScore": 10, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", @@ -92,6 +94,7 @@ deny[msga] { msga := { "alertMessage": "Failed to analyze config file", "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", diff --git a/rules/kubelet-authorization-mode-alwaysAllow/test/fail-no-cli-and-config/expected.json b/rules/kubelet-authorization-mode-alwaysAllow/test/fail-no-cli-and-config/expected.json index 62840632b..501aa0fee 100644 --- a/rules/kubelet-authorization-mode-alwaysAllow/test/fail-no-cli-and-config/expected.json +++ b/rules/kubelet-authorization-mode-alwaysAllow/test/fail-no-cli-and-config/expected.json @@ -14,6 +14,7 @@ } }, "alertScore": 10, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "ruleStatus": "", diff --git a/rules/kubelet-authorization-mode-alwaysAllow/test/fail-sensor-failed/expected.json b/rules/kubelet-authorization-mode-alwaysAllow/test/fail-sensor-failed/expected.json index b2eca50fe..3bb1d7be7 100644 --- a/rules/kubelet-authorization-mode-alwaysAllow/test/fail-sensor-failed/expected.json +++ b/rules/kubelet-authorization-mode-alwaysAllow/test/fail-sensor-failed/expected.json @@ -12,6 +12,7 @@ } }, "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins" diff --git a/rules/kubelet-authorization-mode-alwaysAllow/test/invalid-cli-argument/expected.json b/rules/kubelet-authorization-mode-alwaysAllow/test/invalid-cli-argument/expected.json index ba0f96edc..db49befb9 100644 --- a/rules/kubelet-authorization-mode-alwaysAllow/test/invalid-cli-argument/expected.json +++ b/rules/kubelet-authorization-mode-alwaysAllow/test/invalid-cli-argument/expected.json @@ -14,6 +14,7 @@ } }, "alertScore": 10, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "ruleStatus": "", diff --git a/rules/kubelet-authorization-mode-alwaysAllow/test/invalid-config-value/expected.json b/rules/kubelet-authorization-mode-alwaysAllow/test/invalid-config-value/expected.json index d611bcc49..0a6a445d1 100644 --- a/rules/kubelet-authorization-mode-alwaysAllow/test/invalid-config-value/expected.json +++ b/rules/kubelet-authorization-mode-alwaysAllow/test/invalid-config-value/expected.json @@ -16,6 +16,9 @@ } }, "alertScore": 10, + "reviewPaths": [ + "authorization.mode" + ], "failedPaths": [ "authorization.mode" ], diff --git a/rules/kubelet-event-qps/raw.rego b/rules/kubelet-event-qps/raw.rego index ad0eed856..25eb0220a 100644 --- a/rules/kubelet-event-qps/raw.rego +++ b/rules/kubelet-event-qps/raw.rego @@ -51,6 +51,7 @@ deny[msga] { msga := { "alertMessage": "Failed to analyze config file", "alertScore": 2, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", diff --git a/rules/kubelet-event-qps/test/fail-eventRecordQPS=0-config/expected.json b/rules/kubelet-event-qps/test/fail-eventRecordQPS=0-config/expected.json index d179f9208..2caf95c77 100644 --- a/rules/kubelet-event-qps/test/fail-eventRecordQPS=0-config/expected.json +++ b/rules/kubelet-event-qps/test/fail-eventRecordQPS=0-config/expected.json @@ -16,6 +16,9 @@ } }, "alertScore": 2, + "reviewPaths": [ + "eventRecordQPS" + ], "failedPaths": [ "eventRecordQPS" ], diff --git a/rules/kubelet-event-qps/test/fail-sensor-failed/expected.json b/rules/kubelet-event-qps/test/fail-sensor-failed/expected.json index 141e00b00..49a7cc679 100644 --- a/rules/kubelet-event-qps/test/fail-sensor-failed/expected.json +++ b/rules/kubelet-event-qps/test/fail-sensor-failed/expected.json @@ -12,6 +12,7 @@ } }, "alertScore": 2, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins" diff --git a/rules/kubelet-ip-tables/raw.rego b/rules/kubelet-ip-tables/raw.rego index 0373e1f6b..dc706b2d5 100644 --- a/rules/kubelet-ip-tables/raw.rego +++ b/rules/kubelet-ip-tables/raw.rego @@ -18,6 +18,7 @@ deny[msga] { msga := { "alertMessage": "Argument --make-iptables-util-chains is not set to true.", "alertScore": 3, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", @@ -69,6 +70,7 @@ deny[msga] { msga := { "alertMessage": "Failed to analyze config file", "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", diff --git a/rules/kubelet-ip-tables/test/fail-sensor-failed/expected.json b/rules/kubelet-ip-tables/test/fail-sensor-failed/expected.json index b2eca50fe..3bb1d7be7 100644 --- a/rules/kubelet-ip-tables/test/fail-sensor-failed/expected.json +++ b/rules/kubelet-ip-tables/test/fail-sensor-failed/expected.json @@ -12,6 +12,7 @@ } }, "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins" diff --git a/rules/kubelet-ip-tables/test/fail-set-via-cli/expected.json b/rules/kubelet-ip-tables/test/fail-set-via-cli/expected.json index d7611546b..aada67a9a 100644 --- a/rules/kubelet-ip-tables/test/fail-set-via-cli/expected.json +++ b/rules/kubelet-ip-tables/test/fail-set-via-cli/expected.json @@ -14,6 +14,7 @@ } }, "alertScore": 3, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "ruleStatus": "", diff --git a/rules/kubelet-ip-tables/test/fail-set-via-config/expected.json b/rules/kubelet-ip-tables/test/fail-set-via-config/expected.json index 96d2dc983..3fb7f6895 100644 --- a/rules/kubelet-ip-tables/test/fail-set-via-config/expected.json +++ b/rules/kubelet-ip-tables/test/fail-set-via-config/expected.json @@ -16,6 +16,9 @@ } }, "alertScore": 3, + "reviewPaths": [ + "makeIPTablesUtilChains" + ], "failedPaths": [ "makeIPTablesUtilChains" ], diff --git a/rules/kubelet-protect-kernel-defaults/raw.rego b/rules/kubelet-protect-kernel-defaults/raw.rego index 963ccc6fc..35b25fc41 100644 --- a/rules/kubelet-protect-kernel-defaults/raw.rego +++ b/rules/kubelet-protect-kernel-defaults/raw.rego @@ -18,6 +18,7 @@ deny[msga] { msga := { "alertMessage": "Argument --protect-kernel-defaults is not set to true.", "alertScore": 2, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", @@ -68,6 +69,7 @@ deny[msga] { msga := { "alertMessage": "Argument --protect-kernel-defaults is not set to true.", "alertScore": 2, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", @@ -90,6 +92,7 @@ deny[msga] { msga := { "alertMessage": "Failed to analyze config file", "alertScore": 2, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", diff --git a/rules/kubelet-protect-kernel-defaults/test/deny-config-file-false/expected.json b/rules/kubelet-protect-kernel-defaults/test/deny-config-file-false/expected.json index b8a32681c..d12c18516 100644 --- a/rules/kubelet-protect-kernel-defaults/test/deny-config-file-false/expected.json +++ b/rules/kubelet-protect-kernel-defaults/test/deny-config-file-false/expected.json @@ -16,6 +16,9 @@ } }, "alertScore": 2, + "reviewPaths": [ + "protectKernelDefaults" + ], "failedPaths": [ "protectKernelDefaults" ], diff --git a/rules/kubelet-protect-kernel-defaults/test/fail-no-config-and-cli/expected.json b/rules/kubelet-protect-kernel-defaults/test/fail-no-config-and-cli/expected.json index ca21f3c4f..df74ca04f 100644 --- a/rules/kubelet-protect-kernel-defaults/test/fail-no-config-and-cli/expected.json +++ b/rules/kubelet-protect-kernel-defaults/test/fail-no-config-and-cli/expected.json @@ -14,6 +14,7 @@ } }, "alertScore": 2, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins" diff --git a/rules/kubelet-protect-kernel-defaults/test/fail-set-via-cli/expected.json b/rules/kubelet-protect-kernel-defaults/test/fail-set-via-cli/expected.json index f8f43865f..3ff0abe09 100644 --- a/rules/kubelet-protect-kernel-defaults/test/fail-set-via-cli/expected.json +++ b/rules/kubelet-protect-kernel-defaults/test/fail-set-via-cli/expected.json @@ -14,6 +14,7 @@ } }, "alertScore": 2, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins" diff --git a/rules/kubelet-rotate-certificates/raw.rego b/rules/kubelet-rotate-certificates/raw.rego index bbe633709..c6c53e5ad 100644 --- a/rules/kubelet-rotate-certificates/raw.rego +++ b/rules/kubelet-rotate-certificates/raw.rego @@ -18,6 +18,7 @@ deny[msga] { msga := { "alertMessage": "Kubelet client certificates rotation is disabled", "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", @@ -69,6 +70,7 @@ deny[msga] { msga := { "alertMessage": "Failed to analyze config file", "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", diff --git a/rules/kubelet-rotate-certificates/test/fail-cli-argument-set-false/expected.json b/rules/kubelet-rotate-certificates/test/fail-cli-argument-set-false/expected.json index cf48e1af8..b2ebc760d 100644 --- a/rules/kubelet-rotate-certificates/test/fail-cli-argument-set-false/expected.json +++ b/rules/kubelet-rotate-certificates/test/fail-cli-argument-set-false/expected.json @@ -14,6 +14,7 @@ } }, "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "ruleStatus": "", diff --git a/rules/kubelet-rotate-certificates/test/fail-sensor-failed/expected.json b/rules/kubelet-rotate-certificates/test/fail-sensor-failed/expected.json index b2eca50fe..3bb1d7be7 100644 --- a/rules/kubelet-rotate-certificates/test/fail-sensor-failed/expected.json +++ b/rules/kubelet-rotate-certificates/test/fail-sensor-failed/expected.json @@ -12,6 +12,7 @@ } }, "alertScore": 6, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins" diff --git a/rules/kubelet-rotate-certificates/test/fail-set-false-via-config-file/expected.json b/rules/kubelet-rotate-certificates/test/fail-set-false-via-config-file/expected.json index b8ff51b1a..7b20fa052 100644 --- a/rules/kubelet-rotate-certificates/test/fail-set-false-via-config-file/expected.json +++ b/rules/kubelet-rotate-certificates/test/fail-set-false-via-config-file/expected.json @@ -16,6 +16,9 @@ } }, "alertScore": 6, + "reviewPaths": [ + "rotateCertificates" + ], "failedPaths": [ "rotateCertificates" ], diff --git a/rules/kubelet-streaming-connection-idle-timeout/raw.rego b/rules/kubelet-streaming-connection-idle-timeout/raw.rego index 86532b50c..37157385c 100644 --- a/rules/kubelet-streaming-connection-idle-timeout/raw.rego +++ b/rules/kubelet-streaming-connection-idle-timeout/raw.rego @@ -18,6 +18,7 @@ deny[msga] { msga := { "alertMessage": "Timeouts on streaming connections are enabled", "alertScore": 3, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", @@ -69,6 +70,7 @@ deny[msga] { msga := { "alertMessage": "Failed to analyze config file", "alertScore": 3, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", diff --git a/rules/kubelet-streaming-connection-idle-timeout/test/fail-config-file/expected.json b/rules/kubelet-streaming-connection-idle-timeout/test/fail-config-file/expected.json index c59f4c362..03049ff53 100644 --- a/rules/kubelet-streaming-connection-idle-timeout/test/fail-config-file/expected.json +++ b/rules/kubelet-streaming-connection-idle-timeout/test/fail-config-file/expected.json @@ -16,6 +16,9 @@ } }, "alertScore": 3, + "reviewPaths": [ + "streamingConnectionIdleTimeout" + ], "failedPaths": [ "streamingConnectionIdleTimeout" ], diff --git a/rules/kubelet-streaming-connection-idle-timeout/test/fail-sensor-failed/expected.json b/rules/kubelet-streaming-connection-idle-timeout/test/fail-sensor-failed/expected.json index eca26bacd..4f6d2acb3 100644 --- a/rules/kubelet-streaming-connection-idle-timeout/test/fail-sensor-failed/expected.json +++ b/rules/kubelet-streaming-connection-idle-timeout/test/fail-sensor-failed/expected.json @@ -12,6 +12,7 @@ } }, "alertScore": 3, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins" diff --git a/rules/kubelet-streaming-connection-idle-timeout/test/fail-set-via-cli/expected.json b/rules/kubelet-streaming-connection-idle-timeout/test/fail-set-via-cli/expected.json index 6c4d42f08..f0b8d262b 100644 --- a/rules/kubelet-streaming-connection-idle-timeout/test/fail-set-via-cli/expected.json +++ b/rules/kubelet-streaming-connection-idle-timeout/test/fail-set-via-cli/expected.json @@ -9,6 +9,7 @@ "kind": "KubeletInfo" }, "alertScore": 3, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins" diff --git a/rules/kubelet-strong-cryptography-ciphers/raw.rego b/rules/kubelet-strong-cryptography-ciphers/raw.rego index 5871f6968..6f1e057c0 100644 --- a/rules/kubelet-strong-cryptography-ciphers/raw.rego +++ b/rules/kubelet-strong-cryptography-ciphers/raw.rego @@ -19,6 +19,7 @@ deny[msga] { msga := { "alertMessage": "Kubelet is not configured to only use strong cryptographic ciphers", "alertScore": 5, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", @@ -71,6 +72,7 @@ deny[msga] { msga := { "alertMessage": "Kubelet is not configured to only use strong cryptographic ciphers", "alertScore": 5, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", diff --git a/rules/kubelet-strong-cryptography-ciphers/test/fail-cli-and-config-not-set/expected.json b/rules/kubelet-strong-cryptography-ciphers/test/fail-cli-and-config-not-set/expected.json index 546cf33e7..5bfe968bb 100644 --- a/rules/kubelet-strong-cryptography-ciphers/test/fail-cli-and-config-not-set/expected.json +++ b/rules/kubelet-strong-cryptography-ciphers/test/fail-cli-and-config-not-set/expected.json @@ -14,6 +14,7 @@ } }, "alertScore": 5, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "ruleStatus": "", diff --git a/rules/kubelet-strong-cryptography-ciphers/test/fail-cli/expected.json b/rules/kubelet-strong-cryptography-ciphers/test/fail-cli/expected.json index 5179ac94a..8303f834f 100644 --- a/rules/kubelet-strong-cryptography-ciphers/test/fail-cli/expected.json +++ b/rules/kubelet-strong-cryptography-ciphers/test/fail-cli/expected.json @@ -14,6 +14,7 @@ } }, "alertScore": 5, + "reviewPaths": [], "failedPaths": [], "fixPaths": [], "ruleStatus": "", diff --git a/rules/kubelet-strong-cryptography-ciphers/test/fail-config-not-supported-value/expected.json b/rules/kubelet-strong-cryptography-ciphers/test/fail-config-not-supported-value/expected.json index a3acada9d..f5219c7e4 100644 --- a/rules/kubelet-strong-cryptography-ciphers/test/fail-config-not-supported-value/expected.json +++ b/rules/kubelet-strong-cryptography-ciphers/test/fail-config-not-supported-value/expected.json @@ -16,6 +16,9 @@ } }, "alertScore": 5, + "reviewPaths": [ + "TLSCipherSuites" + ], "failedPaths": [ "TLSCipherSuites" ], diff --git a/rules/lease-in-default-namespace/test/lease/expected.json b/rules/lease-in-default-namespace/test/lease/expected.json index fb727cdb2..79d9a2a35 100644 --- a/rules/lease-in-default-namespace/test/lease/expected.json +++ b/rules/lease-in-default-namespace/test/lease/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Lease: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/non-root-containers/test/cronjob-fixed-path/expected.json b/rules/non-root-containers/test/cronjob-fixed-path/expected.json index 55e04cef0..435eacac0 100644 --- a/rules/non-root-containers/test/cronjob-fixed-path/expected.json +++ b/rules/non-root-containers/test/cronjob-fixed-path/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "container :hello in CronJob: hello may run as root", + "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.runAsNonRoot", @@ -22,6 +23,7 @@ } }, { "alertMessage": "container :hello2 in CronJob: hello may run as root", + "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.runAsNonRoot", diff --git a/rules/non-root-containers/test/cronjob/expected.json b/rules/non-root-containers/test/cronjob/expected.json index 2fd82b7a9..3a60e688a 100644 --- a/rules/non-root-containers/test/cronjob/expected.json +++ b/rules/non-root-containers/test/cronjob/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "container :hello in CronJob: hello may run as root", + "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.runAsNonRoot", @@ -22,6 +23,7 @@ } }, { "alertMessage": "container :hello2 in CronJob: hello may run as root", + "reviewPaths": ["spec.jobTemplate.spec.template.spec.containers[1].securityContext.runAsNonRoot"], "failedPaths": ["spec.jobTemplate.spec.template.spec.containers[1].securityContext.runAsNonRoot"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/non-root-containers/test/deployment-fixed-path/expected.json b/rules/non-root-containers/test/deployment-fixed-path/expected.json index 5214ad4f2..3b8482116 100644 --- a/rules/non-root-containers/test/deployment-fixed-path/expected.json +++ b/rules/non-root-containers/test/deployment-fixed-path/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "container: web in pod: static-web may run as root", + "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.containers[0].securityContext.runAsNonRoot", diff --git a/rules/non-root-containers/test/deployment/expected.json b/rules/non-root-containers/test/deployment/expected.json index 3cee13298..4b8126ff6 100644 --- a/rules/non-root-containers/test/deployment/expected.json +++ b/rules/non-root-containers/test/deployment/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "container :nginx in Deployment: nginx-deployment may run as root", + "reviewPaths": ["spec.template.spec.containers[0].securityContext.runAsUser"], "failedPaths": ["spec.template.spec.containers[0].securityContext.runAsUser"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/non-root-containers/test/pod/expected.json b/rules/non-root-containers/test/pod/expected.json index 5214ad4f2..3b8482116 100644 --- a/rules/non-root-containers/test/pod/expected.json +++ b/rules/non-root-containers/test/pod/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "container: web in pod: static-web may run as root", + "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.containers[0].securityContext.runAsNonRoot", diff --git a/rules/persistentvolumeclaim-in-default-namespace/test/persistentvolumeclaim/expected.json b/rules/persistentvolumeclaim-in-default-namespace/test/persistentvolumeclaim/expected.json index 2879c2c7c..e03ee3612 100644 --- a/rules/persistentvolumeclaim-in-default-namespace/test/persistentvolumeclaim/expected.json +++ b/rules/persistentvolumeclaim-in-default-namespace/test/persistentvolumeclaim/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PersistentVolumeClaim: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/poddisruptionbudget-in-default-namespace/test/poddisruptionbudget/expected.json b/rules/poddisruptionbudget-in-default-namespace/test/poddisruptionbudget/expected.json index 7ac71c1eb..865c4db6c 100644 --- a/rules/poddisruptionbudget-in-default-namespace/test/poddisruptionbudget/expected.json +++ b/rules/poddisruptionbudget-in-default-namespace/test/poddisruptionbudget/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodDisruptionBudget: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/pods-in-default-namespace/test/cronjob/expected.json b/rules/pods-in-default-namespace/test/cronjob/expected.json index e918da7bd..586b1c277 100644 --- a/rules/pods-in-default-namespace/test/cronjob/expected.json +++ b/rules/pods-in-default-namespace/test/cronjob/expected.json @@ -1,6 +1,7 @@ [{ "alertMessage": "CronJob: hello has pods running in the 'default' namespace", "fixPaths": [], + "reviewPaths": ["metadata.namespace"], "failedPaths": ["metadata.namespace"], "ruleStatus": "", "packagename": "armo_builtins", diff --git a/rules/pods-in-default-namespace/test/pod/expected.json b/rules/pods-in-default-namespace/test/pod/expected.json index 25d0a4afd..88d988ed9 100644 --- a/rules/pods-in-default-namespace/test/pod/expected.json +++ b/rules/pods-in-default-namespace/test/pod/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Pod: envar-demo has pods running in the 'default' namespace", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/pods-in-default-namespace/test/workload/expected.json b/rules/pods-in-default-namespace/test/workload/expected.json index 9c6cea39b..d77fdf0af 100644 --- a/rules/pods-in-default-namespace/test/workload/expected.json +++ b/rules/pods-in-default-namespace/test/workload/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Deployment: test has pods running in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/podtemplate-in-default-namespace/test/podtemplate/expected.json b/rules/podtemplate-in-default-namespace/test/podtemplate/expected.json index 3d0b0addd..a622dfa80 100644 --- a/rules/podtemplate-in-default-namespace/test/podtemplate/expected.json +++ b/rules/podtemplate-in-default-namespace/test/podtemplate/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodTemplate: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/psp-deny-allowed-capabilities/test/fail-many-true/expected.json b/rules/psp-deny-allowed-capabilities/test/fail-many-true/expected.json index 4d6c7b068..2531fa8e0 100644 --- a/rules/psp-deny-allowed-capabilities/test/fail-many-true/expected.json +++ b/rules/psp-deny-allowed-capabilities/test/fail-many-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has allowedCapabilities.", + "deletePaths": [ + "spec.allowedCapabilities" + ], "failedPaths": [ "spec.allowedCapabilities" ], @@ -26,6 +29,9 @@ }, { "alertMessage": "PodSecurityPolicy: 'eks.privileged1' has allowedCapabilities.", + "deletePaths": [ + "spec.allowedCapabilities" + ], "failedPaths": [ "spec.allowedCapabilities" ], diff --git a/rules/psp-deny-allowed-capabilities/test/fail-only-one-true/expected.json b/rules/psp-deny-allowed-capabilities/test/fail-only-one-true/expected.json index b633e668e..e6253fbe6 100644 --- a/rules/psp-deny-allowed-capabilities/test/fail-only-one-true/expected.json +++ b/rules/psp-deny-allowed-capabilities/test/fail-only-one-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has allowedCapabilities.", + "deletePaths": [ + "spec.allowedCapabilities" + ], "failedPaths": [ "spec.allowedCapabilities" ], diff --git a/rules/psp-deny-allowprivilegeescalation/test/fail-many-true/expected.json b/rules/psp-deny-allowprivilegeescalation/test/fail-many-true/expected.json index 78877ccb0..0d87c6515 100644 --- a/rules/psp-deny-allowprivilegeescalation/test/fail-many-true/expected.json +++ b/rules/psp-deny-allowprivilegeescalation/test/fail-many-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has allowPrivilegeEscalation set as true.", + "deletePaths": [ + "spec.allowPrivilegeEscalation" + ], "failedPaths": [ "spec.allowPrivilegeEscalation" ], @@ -26,6 +29,9 @@ }, { "alertMessage": "PodSecurityPolicy: 'eks.privileged1' has allowPrivilegeEscalation set as true.", + "deletePaths": [ + "spec.allowPrivilegeEscalation" + ], "failedPaths": [ "spec.allowPrivilegeEscalation" ], diff --git a/rules/psp-deny-allowprivilegeescalation/test/fail-only-one-true/expected.json b/rules/psp-deny-allowprivilegeescalation/test/fail-only-one-true/expected.json index 6c69f9c40..c5b62a58e 100644 --- a/rules/psp-deny-allowprivilegeescalation/test/fail-only-one-true/expected.json +++ b/rules/psp-deny-allowprivilegeescalation/test/fail-only-one-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has allowPrivilegeEscalation set as true.", + "deletePaths": [ + "spec.allowPrivilegeEscalation" + ], "failedPaths": [ "spec.allowPrivilegeEscalation" ], diff --git a/rules/psp-deny-hostipc/test/fail-many-true/expected.json b/rules/psp-deny-hostipc/test/fail-many-true/expected.json index af8375778..a3f050beb 100644 --- a/rules/psp-deny-hostipc/test/fail-many-true/expected.json +++ b/rules/psp-deny-hostipc/test/fail-many-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has hostIPC set as true.", + "deletePaths": [ + "spec.hostIPC" + ], "failedPaths": [ "spec.hostIPC" ], @@ -26,6 +29,9 @@ }, { "alertMessage": "PodSecurityPolicy: 'eks.privileged1' has hostIPC set as true.", + "deletePaths": [ + "spec.hostIPC" + ], "failedPaths": [ "spec.hostIPC" ], diff --git a/rules/psp-deny-hostipc/test/fail-only-one-true/expected.json b/rules/psp-deny-hostipc/test/fail-only-one-true/expected.json index c13c18869..cec348136 100644 --- a/rules/psp-deny-hostipc/test/fail-only-one-true/expected.json +++ b/rules/psp-deny-hostipc/test/fail-only-one-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has hostIPC set as true.", + "deletePaths": [ + "spec.hostIPC" + ], "failedPaths": [ "spec.hostIPC" ], diff --git a/rules/psp-deny-hostnetwork/test/fail-many-true/expected.json b/rules/psp-deny-hostnetwork/test/fail-many-true/expected.json index c3c022f42..fd31bead1 100644 --- a/rules/psp-deny-hostnetwork/test/fail-many-true/expected.json +++ b/rules/psp-deny-hostnetwork/test/fail-many-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has hostNetwork set as true.", + "deletePaths": [ + "spec.hostNetwork" + ], "failedPaths": [ "spec.hostNetwork" ], @@ -26,6 +29,9 @@ }, { "alertMessage": "PodSecurityPolicy: 'eks.privileged1' has hostNetwork set as true.", + "deletePaths": [ + "spec.hostNetwork" + ], "failedPaths": [ "spec.hostNetwork" ], diff --git a/rules/psp-deny-hostnetwork/test/fail-only-one-true/expected.json b/rules/psp-deny-hostnetwork/test/fail-only-one-true/expected.json index 2d6598a06..257906445 100644 --- a/rules/psp-deny-hostnetwork/test/fail-only-one-true/expected.json +++ b/rules/psp-deny-hostnetwork/test/fail-only-one-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has hostNetwork set as true.", + "deletePaths": [ + "spec.hostNetwork" + ], "failedPaths": [ "spec.hostNetwork" ], diff --git a/rules/psp-deny-hostpid/test/fail-many-true/expected.json b/rules/psp-deny-hostpid/test/fail-many-true/expected.json index c3ce6928a..cb1be0601 100644 --- a/rules/psp-deny-hostpid/test/fail-many-true/expected.json +++ b/rules/psp-deny-hostpid/test/fail-many-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has hostPID set as true.", + "deletePaths": [ + "spec.hostPID" + ], "failedPaths": [ "spec.hostPID" ], @@ -26,6 +29,9 @@ }, { "alertMessage": "PodSecurityPolicy: 'eks.privileged1' has hostPID set as true.", + "deletePaths": [ + "spec.hostPID" + ], "failedPaths": [ "spec.hostPID" ], diff --git a/rules/psp-deny-hostpid/test/fail-only-one-true/expected.json b/rules/psp-deny-hostpid/test/fail-only-one-true/expected.json index 20b297e64..9c7340fc7 100644 --- a/rules/psp-deny-hostpid/test/fail-only-one-true/expected.json +++ b/rules/psp-deny-hostpid/test/fail-only-one-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has hostPID set as true.", + "deletePaths": [ + "spec.hostPID" + ], "failedPaths": [ "spec.hostPID" ], diff --git a/rules/psp-deny-privileged-container/test/fail-many-true/expected.json b/rules/psp-deny-privileged-container/test/fail-many-true/expected.json index fd5515975..fab7a4d0c 100644 --- a/rules/psp-deny-privileged-container/test/fail-many-true/expected.json +++ b/rules/psp-deny-privileged-container/test/fail-many-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has privileged set as true.", + "deletePaths": [ + "spec.privileged" + ], "failedPaths": [ "spec.privileged" ], @@ -26,6 +29,9 @@ }, { "alertMessage": "PodSecurityPolicy: 'eks.privileged1' has privileged set as true.", + "deletePaths": [ + "spec.privileged" + ], "failedPaths": [ "spec.privileged" ], diff --git a/rules/psp-deny-privileged-container/test/fail-only-one-true/expected.json b/rules/psp-deny-privileged-container/test/fail-only-one-true/expected.json index 15c18b4cd..1a96548f7 100644 --- a/rules/psp-deny-privileged-container/test/fail-only-one-true/expected.json +++ b/rules/psp-deny-privileged-container/test/fail-only-one-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' has privileged set as true.", + "deletePaths": [ + "spec.privileged" + ], "failedPaths": [ "spec.privileged" ], diff --git a/rules/psp-deny-root-container/test/fail-many-true/expected.json b/rules/psp-deny-root-container/test/fail-many-true/expected.json index 01a89a6db..ec899eb5e 100644 --- a/rules/psp-deny-root-container/test/fail-many-true/expected.json +++ b/rules/psp-deny-root-container/test/fail-many-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' permits containers to run as the root user.", + "deletePaths": [ + "spec.runAsUser.rule" + ], "failedPaths": [ "spec.runAsUser.rule" ], @@ -26,6 +29,9 @@ }, { "alertMessage": "PodSecurityPolicy: 'eks.privileged1' permits containers to run as the root user.", + "deletePaths": [ + "spec.runAsUser.rule" + ], "failedPaths": [ "spec.runAsUser.rule" ], diff --git a/rules/psp-deny-root-container/test/fail-only-one-true/expected.json b/rules/psp-deny-root-container/test/fail-only-one-true/expected.json index 8618f025a..6246d7a6e 100644 --- a/rules/psp-deny-root-container/test/fail-only-one-true/expected.json +++ b/rules/psp-deny-root-container/test/fail-only-one-true/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy: 'eks.privileged' permits containers to run as the root user.", + "deletePaths": [ + "spec.runAsUser.rule" + ], "failedPaths": [ "spec.runAsUser.rule" ], diff --git a/rules/psp-enabled-native/test/test-failed/expected.json b/rules/psp-enabled-native/test/test-failed/expected.json index 537c6d491..20f42ac92 100644 --- a/rules/psp-enabled-native/test/test-failed/expected.json +++ b/rules/psp-enabled-native/test/test-failed/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "PodSecurityPolicy is not enabled", + "reviewPaths": [ + "spec.containers[0].command[5]" + ], "failedPaths": [ "spec.containers[0].command[5]" ], diff --git a/rules/rbac-enabled-cloud/test/failed/expected.json b/rules/rbac-enabled-cloud/test/failed/expected.json index 0c6c87b67..7b49d0712 100644 --- a/rules/rbac-enabled-cloud/test/failed/expected.json +++ b/rules/rbac-enabled-cloud/test/failed/expected.json @@ -3,6 +3,7 @@ "alertMessage": "rbac is not enabled", "alertScore": 3, "packagename": "armo_builtins", + "reviewPaths": ["data.properties.enableRBAC"], "failedPaths": ["data.properties.enableRBAC"], "fixCommand": "", "fixPaths": [], diff --git a/rules/read-only-port-enabled-updated/test/config-fail/expected.json b/rules/read-only-port-enabled-updated/test/config-fail/expected.json index 64864b2ce..d9b986492 100644 --- a/rules/read-only-port-enabled-updated/test/config-fail/expected.json +++ b/rules/read-only-port-enabled-updated/test/config-fail/expected.json @@ -16,6 +16,9 @@ } }, "alertScore": 4, + "reviewPaths": [ + "readOnlyPort" + ], "failedPaths": [ "readOnlyPort" ], diff --git a/rules/replicationcontroller-in-default-namespace/test/replicationcontroller/expected.json b/rules/replicationcontroller-in-default-namespace/test/replicationcontroller/expected.json index eaae124e9..b20af5ca4 100644 --- a/rules/replicationcontroller-in-default-namespace/test/replicationcontroller/expected.json +++ b/rules/replicationcontroller-in-default-namespace/test/replicationcontroller/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "ReplicationController: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/resources-cpu-limit-and-request/raw.rego b/rules/resources-cpu-limit-and-request/raw.rego index 114962d76..2baf4abb2 100644 --- a/rules/resources-cpu-limit-and-request/raw.rego +++ b/rules/resources-cpu-limit-and-request/raw.rego @@ -14,6 +14,7 @@ deny[msga] { "alertMessage": sprintf("Container: %v does not have CPU-limit or request", [ container.name]), "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths": fixPaths, "alertObject": { @@ -36,6 +37,7 @@ deny[msga] { "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths": fixPaths, "alertObject": { @@ -57,6 +59,7 @@ deny[msga] { "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths": fixPaths, "alertObject": { @@ -79,6 +82,7 @@ deny[msga] { "alertMessage": sprintf("Container: %v does not have CPU-limit or request", [ container.name]), "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths": fixPaths, "alertObject": { @@ -101,6 +105,7 @@ deny[msga] { "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths": fixPaths, "alertObject": { @@ -122,6 +127,7 @@ deny[msga] { "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths": fixPaths, "alertObject": { diff --git a/rules/resources-cpu-limit-and-request/test/cronjob/expected.json b/rules/resources-cpu-limit-and-request/test/cronjob/expected.json index e03f943e8..07f3941e9 100644 --- a/rules/resources-cpu-limit-and-request/test/cronjob/expected.json +++ b/rules/resources-cpu-limit-and-request/test/cronjob/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Container: hello in CronJob: hello does not have CPU-limit or request", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { @@ -25,6 +26,7 @@ }, { "alertMessage": "Container: hello in CronJob: hello does not have CPU-limit or request", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json b/rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json index a19179dff..0774d1458 100644 --- a/rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json +++ b/rules/resources-cpu-limit-and-request/test/pod-only-limits/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "reviewPaths": [], "failedPaths": [], "fixPaths" : [{"path":"spec.containers[1].resources.limits.cpu", "value": "YOUR_VALUE"}], "ruleStatus": "", diff --git a/rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json b/rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json index 8a0bba75c..83beae079 100644 --- a/rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json +++ b/rules/resources-cpu-limit-and-request/test/pod-only-requests/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "reviewPaths": [], "failedPaths": [], "fixPaths" : [{"path": "spec.containers[1].resources.requests.cpu", "value": "YOUR_VALUE"}], "ruleStatus": "", diff --git a/rules/resources-cpu-limit-and-request/test/pod/expected.json b/rules/resources-cpu-limit-and-request/test/pod/expected.json index 08f0190f9..08d1752e1 100644 --- a/rules/resources-cpu-limit-and-request/test/pod/expected.json +++ b/rules/resources-cpu-limit-and-request/test/pod/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { @@ -25,6 +26,7 @@ }, { "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json b/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json index ee7dd0f9e..a5a1d7bd5 100644 --- a/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json +++ b/rules/resources-cpu-limit-and-request/test/workload-exceeded/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Container: health-check in Deployment: health-check-deployment does not have CPU-limit or request", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { @@ -25,6 +26,10 @@ }, { "alertMessage": "Container: health-check in Deployment: health-check-deployment exceeds CPU-limit or request", + "reviewPaths": [ + "spec.template.spec.containers[0].resources.limits.cpu" + ], + "failedPaths": [ "spec.template.spec.containers[0].resources.limits.cpu" ], diff --git a/rules/resources-cpu-limit-and-request/test/workload/expected.json b/rules/resources-cpu-limit-and-request/test/workload/expected.json index 0d4c0c19e..3cdc23f4e 100644 --- a/rules/resources-cpu-limit-and-request/test/workload/expected.json +++ b/rules/resources-cpu-limit-and-request/test/workload/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Container: app in Deployment: test does not have CPU-limit or request", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { @@ -28,6 +29,7 @@ }, { "alertMessage": "Container: app in Deployment: test does not have CPU-limit or request", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/resources-memory-limit-and-request/test/workload-exceeded/expected.json b/rules/resources-memory-limit-and-request/test/workload-exceeded/expected.json index fb8d24b50..15a317325 100644 --- a/rules/resources-memory-limit-and-request/test/workload-exceeded/expected.json +++ b/rules/resources-memory-limit-and-request/test/workload-exceeded/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Container: log-aggregator in Deployment: test exceeds memory request", + "reviewPaths": [ + "spec.template.spec.containers[0].resources.requests.memory" + ], "failedPaths": [ "spec.template.spec.containers[0].resources.requests.memory" ], @@ -25,6 +28,9 @@ }, { "alertMessage": "Container: log-aggregator in Deployment: test exceeds memory-limit", + "reviewPaths": [ + "spec.template.spec.containers[0].resources.limits.memory" + ], "failedPaths": [ "spec.template.spec.containers[0].resources.limits.memory" ], diff --git a/rules/resources-secret-in-default-namespace/test/configmap/expected.json b/rules/resources-secret-in-default-namespace/test/configmap/expected.json index 656beee22..e7cec61ce 100644 --- a/rules/resources-secret-in-default-namespace/test/configmap/expected.json +++ b/rules/resources-secret-in-default-namespace/test/configmap/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "ConfigMap: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/restrict-access-to-the-control-plane-endpoint/raw.rego b/rules/restrict-access-to-the-control-plane-endpoint/raw.rego index 14eae0211..eab684e2b 100644 --- a/rules/restrict-access-to-the-control-plane-endpoint/raw.rego +++ b/rules/restrict-access-to-the-control-plane-endpoint/raw.rego @@ -15,6 +15,7 @@ deny[msga] { "alertMessage": "Parameter 'authorizedIPRanges' was not set.", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths":[], "fixCommand": "az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'", diff --git a/rules/restrict-access-to-the-control-plane-endpoint/test/failed/expected.json b/rules/restrict-access-to-the-control-plane-endpoint/test/failed/expected.json index e1dbb550c..f24ffbbc6 100644 --- a/rules/restrict-access-to-the-control-plane-endpoint/test/failed/expected.json +++ b/rules/restrict-access-to-the-control-plane-endpoint/test/failed/expected.json @@ -3,6 +3,7 @@ "alertMessage": "Parameter 'authorizedIPRanges' was not set.", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths":[], "fixCommand": "az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'", diff --git a/rules/role-in-default-namespace/test/role/expected.json b/rules/role-in-default-namespace/test/role/expected.json index 8f796889c..dfd319155 100644 --- a/rules/role-in-default-namespace/test/role/expected.json +++ b/rules/role-in-default-namespace/test/role/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Role: allow-port-forward is in the 'default' namespace", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { diff --git a/rules/rolebinding-in-default-namespace/test/rolebinding/expected.json b/rules/rolebinding-in-default-namespace/test/rolebinding/expected.json index ae41833c3..38d0687b1 100644 --- a/rules/rolebinding-in-default-namespace/test/rolebinding/expected.json +++ b/rules/rolebinding-in-default-namespace/test/rolebinding/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "RoleBinding: pod is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/rule-access-dashboard-subject-v1/test/clusterrole-clusterrolebinding/expected.json b/rules/rule-access-dashboard-subject-v1/test/clusterrole-clusterrolebinding/expected.json index e935b1b80..c9f474816 100644 --- a/rules/rule-access-dashboard-subject-v1/test/clusterrole-clusterrolebinding/expected.json +++ b/rules/rule-access-dashboard-subject-v1/test/clusterrole-clusterrolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: Group-manager is bound to dashboard role/clusterrole", + "reviewPaths": ["relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", @@ -46,6 +47,7 @@ } }, { "alertMessage": "Subject: Group-manager is bound to dashboard role/clusterrole", + "reviewPaths": ["relatedObjects[0].subjects[1]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[0].subjects[1]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", @@ -92,6 +94,7 @@ } }, { "alertMessage": "Subject: Group-dev is bound to dashboard role/clusterrole", + "reviewPaths": ["relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", @@ -138,6 +141,7 @@ } }, { "alertMessage": "Subject: Group-dev is bound to dashboard role/clusterrole", + "reviewPaths": ["relatedObjects[0].subjects[1]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[0].subjects[1]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-access-dashboard-subject-v1/test/clusterrole-rolebinding/expected.json b/rules/rule-access-dashboard-subject-v1/test/clusterrole-rolebinding/expected.json index 2e57893fe..97b9222f4 100644 --- a/rules/rule-access-dashboard-subject-v1/test/clusterrole-rolebinding/expected.json +++ b/rules/rule-access-dashboard-subject-v1/test/clusterrole-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane is bound to dashboard role/clusterrole", + "reviewPaths": ["relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-access-dashboard-subject-v1/test/role-rolebinding/expected.json b/rules/rule-access-dashboard-subject-v1/test/role-rolebinding/expected.json index 1fb43ff8e..4fe11acf5 100644 --- a/rules/rule-access-dashboard-subject-v1/test/role-rolebinding/expected.json +++ b/rules/rule-access-dashboard-subject-v1/test/role-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane is bound to dashboard role/clusterrole", + "reviewPaths": ["relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-access-dashboard-wl-v1/test/workload/expected.json b/rules/rule-access-dashboard-wl-v1/test/workload/expected.json index e9feb3edd..7c12fc3b8 100644 --- a/rules/rule-access-dashboard-wl-v1/test/workload/expected.json +++ b/rules/rule-access-dashboard-wl-v1/test/workload/expected.json @@ -1,7 +1,7 @@ [{ "alertMessage": "Deployment: test is associated with dashboard service account", - "failedPaths": ["spec.template.spec.serviceAccountName"], "deletePaths": ["spec.template.spec.serviceAccountName"], + "failedPaths": ["spec.template.spec.serviceAccountName"], "fixPaths": [], "ruleStatus": "", "packagename": "armo_builtins", diff --git a/rules/rule-allow-privilege-escalation/test/cronjob/expected.json b/rules/rule-allow-privilege-escalation/test/cronjob/expected.json index ffc99d19f..5e3ef1b1e 100644 --- a/rules/rule-allow-privilege-escalation/test/cronjob/expected.json +++ b/rules/rule-allow-privilege-escalation/test/cronjob/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "container :mysql in CronJob: hello allow privilege escalation", + "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation", @@ -19,6 +20,7 @@ } }, { "alertMessage": "container :php in CronJob: hello allow privilege escalation", + "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.allowPrivilegeEscalation", diff --git a/rules/rule-allow-privilege-escalation/test/pod/expected.json b/rules/rule-allow-privilege-escalation/test/pod/expected.json index a14de9c55..9ecc18440 100644 --- a/rules/rule-allow-privilege-escalation/test/pod/expected.json +++ b/rules/rule-allow-privilege-escalation/test/pod/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "container: test-container in pod: audit-pod allow privilege escalation", + "reviewPaths": ["spec.containers[0].securityContext.allowPrivilegeEscalation"], "failedPaths": ["spec.containers[0].securityContext.allowPrivilegeEscalation"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-allow-privilege-escalation/test/workloads/expected.json b/rules/rule-allow-privilege-escalation/test/workloads/expected.json index 286f502a6..d8ffd3d4f 100644 --- a/rules/rule-allow-privilege-escalation/test/workloads/expected.json +++ b/rules/rule-allow-privilege-escalation/test/workloads/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "container :mysql in Deployment: my-deployment allow privilege escalation", + "reviewPaths": ["spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation"], "failedPaths": ["spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation"], "fixPaths": [], "ruleStatus": "", @@ -19,6 +20,7 @@ } }, { "alertMessage": "container :php in Deployment: my-deployment allow privilege escalation", + "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.template.spec.containers[1].securityContext.allowPrivilegeEscalation", diff --git a/rules/rule-can-bind-escalate/test/clusterrole-clusterrolebinding/expected.json b/rules/rule-can-bind-escalate/test/clusterrole-clusterrolebinding/expected.json index e93d38d4f..c26cc3d6a 100644 --- a/rules/rule-can-bind-escalate/test/clusterrole-clusterrolebinding/expected.json +++ b/rules/rule-can-bind-escalate/test/clusterrole-clusterrolebinding/expected.json @@ -1,6 +1,13 @@ [ { "alertMessage": "Subject: Group-dev can bind roles/clusterroles", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[1]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[1]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].verbs[0]", @@ -72,6 +79,13 @@ }, { "alertMessage": "Subject: Group-manager can bind roles/clusterroles", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[1]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].verbs[0]", diff --git a/rules/rule-can-bind-escalate/test/role-rolebinding/expected.json b/rules/rule-can-bind-escalate/test/role-rolebinding/expected.json index f849b83f3..eec1da859 100644 --- a/rules/rule-can-bind-escalate/test/role-rolebinding/expected.json +++ b/rules/rule-can-bind-escalate/test/role-rolebinding/expected.json @@ -1,6 +1,13 @@ [ { "alertMessage": "Subject: User-jane can bind roles/clusterroles", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", @@ -68,6 +75,14 @@ }, { "alertMessage": "Subject: User-jane can escalate roles/clusterroles", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", diff --git a/rules/rule-can-create-pod/test/clusterrole-rolebinding/expected.json b/rules/rule-can-create-pod/test/clusterrole-rolebinding/expected.json index 20d237a77..dbc102d71 100644 --- a/rules/rule-can-create-pod/test/clusterrole-rolebinding/expected.json +++ b/rules/rule-can-create-pod/test/clusterrole-rolebinding/expected.json @@ -1,6 +1,13 @@ [ { "alertMessage": "Subject: User-jane can create pods", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", diff --git a/rules/rule-can-create-pod/test/role-rolebinding/expected.json b/rules/rule-can-create-pod/test/role-rolebinding/expected.json index 3d717ffc0..752922657 100644 --- a/rules/rule-can-create-pod/test/role-rolebinding/expected.json +++ b/rules/rule-can-create-pod/test/role-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can create pods", + "reviewPaths": ["relatedObjects[1].rules[0].resources[2]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].verbs[2]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[2]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].verbs[2]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-delete-k8s-events-v1/test/clusterrole-clusterrolebinding/expected.json b/rules/rule-can-delete-k8s-events-v1/test/clusterrole-clusterrolebinding/expected.json index c6cbaabf3..3085ed5cb 100644 --- a/rules/rule-can-delete-k8s-events-v1/test/clusterrole-clusterrolebinding/expected.json +++ b/rules/rule-can-delete-k8s-events-v1/test/clusterrole-clusterrolebinding/expected.json @@ -1,6 +1,13 @@ [ { "alertMessage": "Subject: Group-manager can delete events", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[1]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].verbs[1]", @@ -71,6 +78,13 @@ }, { "alertMessage": "Subject: Group-dev can delete events", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[1]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[1]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].verbs[1]", diff --git a/rules/rule-can-delete-k8s-events-v1/test/clusterrole-rolebinding/expected.json b/rules/rule-can-delete-k8s-events-v1/test/clusterrole-rolebinding/expected.json index c82e769c5..4ecfa4223 100644 --- a/rules/rule-can-delete-k8s-events-v1/test/clusterrole-rolebinding/expected.json +++ b/rules/rule-can-delete-k8s-events-v1/test/clusterrole-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can delete events", + "reviewPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-delete-k8s-events-v1/test/role-rolebinding/expected.json b/rules/rule-can-delete-k8s-events-v1/test/role-rolebinding/expected.json index c16e85e7b..66c747e9d 100644 --- a/rules/rule-can-delete-k8s-events-v1/test/role-rolebinding/expected.json +++ b/rules/rule-can-delete-k8s-events-v1/test/role-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can delete events", + "reviewPaths": ["relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-impersonate-users-groups-v1/test/clusterrole-clusterrolebinding/expected.json b/rules/rule-can-impersonate-users-groups-v1/test/clusterrole-clusterrolebinding/expected.json index cc826001a..1cc80c214 100644 --- a/rules/rule-can-impersonate-users-groups-v1/test/clusterrole-clusterrolebinding/expected.json +++ b/rules/rule-can-impersonate-users-groups-v1/test/clusterrole-clusterrolebinding/expected.json @@ -1,6 +1,13 @@ [ { "alertMessage": "Subject: Group-dev can impersonate users", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[2]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[1]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[2]", "relatedObjects[1].rules[0].verbs[1]", @@ -71,6 +78,13 @@ }, { "alertMessage": "Subject: Group-manager can impersonate users", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[2]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[2]", "relatedObjects[1].rules[0].verbs[1]", diff --git a/rules/rule-can-impersonate-users-groups-v1/test/clusterrole-rolebinding/expected.json b/rules/rule-can-impersonate-users-groups-v1/test/clusterrole-rolebinding/expected.json index eb18bcea0..4a464eee5 100644 --- a/rules/rule-can-impersonate-users-groups-v1/test/clusterrole-rolebinding/expected.json +++ b/rules/rule-can-impersonate-users-groups-v1/test/clusterrole-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can impersonate users", + "reviewPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-impersonate-users-groups-v1/test/role-rolebinding/expected.json b/rules/rule-can-impersonate-users-groups-v1/test/role-rolebinding/expected.json index 95ab976fd..7d0a3a772 100644 --- a/rules/rule-can-impersonate-users-groups-v1/test/role-rolebinding/expected.json +++ b/rules/rule-can-impersonate-users-groups-v1/test/role-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can impersonate users", + "reviewPaths": ["relatedObjects[1].rules[0].resources[2]", "relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].verbs[2]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[2]", "relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].verbs[2]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-list-get-secrets-v1/test/clusterrole-clusterrolebinding/expected.json b/rules/rule-can-list-get-secrets-v1/test/clusterrole-clusterrolebinding/expected.json index 0eb2c4dbf..86858e9b5 100644 --- a/rules/rule-can-list-get-secrets-v1/test/clusterrole-clusterrolebinding/expected.json +++ b/rules/rule-can-list-get-secrets-v1/test/clusterrole-clusterrolebinding/expected.json @@ -1,6 +1,13 @@ [ { "alertMessage": "Subject: Group-dev can read secrets", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[1]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[1]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].verbs[0]", @@ -70,6 +77,13 @@ }, { "alertMessage": "Subject: Group-manager can read secrets", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[1]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].verbs[0]", diff --git a/rules/rule-can-list-get-secrets-v1/test/clusterrole-rolebinding/expected.json b/rules/rule-can-list-get-secrets-v1/test/clusterrole-rolebinding/expected.json index bbeb3b661..dc8619d90 100644 --- a/rules/rule-can-list-get-secrets-v1/test/clusterrole-rolebinding/expected.json +++ b/rules/rule-can-list-get-secrets-v1/test/clusterrole-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can read secrets", + "reviewPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-list-get-secrets-v1/test/role-rolebinding/expected.json b/rules/rule-can-list-get-secrets-v1/test/role-rolebinding/expected.json index df37920cc..2954acbb2 100644 --- a/rules/rule-can-list-get-secrets-v1/test/role-rolebinding/expected.json +++ b/rules/rule-can-list-get-secrets-v1/test/role-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can read secrets", + "reviewPaths": ["relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-portforward-v1/test/clusterrole-clusterrolebinding/expected.json b/rules/rule-can-portforward-v1/test/clusterrole-clusterrolebinding/expected.json index 61bc518aa..438b76b61 100644 --- a/rules/rule-can-portforward-v1/test/clusterrole-clusterrolebinding/expected.json +++ b/rules/rule-can-portforward-v1/test/clusterrole-clusterrolebinding/expected.json @@ -1,6 +1,13 @@ [ { "alertMessage": "Subject: Group-manager can do port forwarding", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", @@ -70,6 +77,13 @@ }, { "alertMessage": "Subject: Group-dev can do port forwarding", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[1]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", diff --git a/rules/rule-can-portforward-v1/test/clusterrole-rolebinding/expected.json b/rules/rule-can-portforward-v1/test/clusterrole-rolebinding/expected.json index 1efc7a717..c760347e5 100644 --- a/rules/rule-can-portforward-v1/test/clusterrole-rolebinding/expected.json +++ b/rules/rule-can-portforward-v1/test/clusterrole-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can do port forwarding", + "reviewPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-portforward-v1/test/role-rolebinding/expected.json b/rules/rule-can-portforward-v1/test/role-rolebinding/expected.json index 2362cb9e8..1e4f304f1 100644 --- a/rules/rule-can-portforward-v1/test/role-rolebinding/expected.json +++ b/rules/rule-can-portforward-v1/test/role-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can do port forwarding", + "reviewPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[1]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[1]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-ssh-to-pod-v1/test/pod/expected.json b/rules/rule-can-ssh-to-pod-v1/test/pod/expected.json index 9d49820c9..1b3924152 100644 --- a/rules/rule-can-ssh-to-pod-v1/test/pod/expected.json +++ b/rules/rule-can-ssh-to-pod-v1/test/pod/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "pod default/audit-pod exposed by SSH services: {\"apiVersion\": \"v1\", \"kind\": \"Service\", \"metadata\": {\"name\": \"my-service\", \"namespace\": \"default\"}, \"spec\": {\"ports\": [{\"port\": 2222, \"protocol\": \"TCP\", \"targetPort\": 2222}], \"selector\": {\"app\": \"audit-pod\"}}}", + "reviewPaths": ["metadata.labels"], "failedPaths": ["metadata.labels"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-ssh-to-pod-v1/test/workloads/expected.json b/rules/rule-can-ssh-to-pod-v1/test/workloads/expected.json index 39973b461..539738965 100644 --- a/rules/rule-can-ssh-to-pod-v1/test/workloads/expected.json +++ b/rules/rule-can-ssh-to-pod-v1/test/workloads/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Deployment: test2 is exposed by SSH services: {\"apiVersion\": \"v1\", \"kind\": \"Service\", \"metadata\": {\"name\": \"my-service\", \"namespace\": \"default\"}, \"spec\": {\"ports\": [{\"port\": 2222, \"protocol\": \"TCP\", \"targetPort\": 2222}], \"selector\": {\"app\": \"audit-pod\"}}}", + "reviewPaths": ["spec.template.metadata.labels"], "failedPaths": ["spec.template.metadata.labels"], "fixPaths": null, "ruleStatus": "", diff --git a/rules/rule-can-update-configmap-v1/test/clusterrole-clusterrolebinding/expected.json b/rules/rule-can-update-configmap-v1/test/clusterrole-clusterrolebinding/expected.json index a69892959..dbae45fa3 100644 --- a/rules/rule-can-update-configmap-v1/test/clusterrole-clusterrolebinding/expected.json +++ b/rules/rule-can-update-configmap-v1/test/clusterrole-clusterrolebinding/expected.json @@ -1,6 +1,13 @@ [ { "alertMessage": "Subject: Group-manager can modify 'coredns' configmap", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[1]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].verbs[0]", @@ -70,6 +77,13 @@ }, { "alertMessage": "Subject: Group-dev can modify 'coredns' configmap", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[1]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[1]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].verbs[0]", diff --git a/rules/rule-can-update-configmap-v1/test/clusterrole-rolebinding/expected.json b/rules/rule-can-update-configmap-v1/test/clusterrole-rolebinding/expected.json index c1406ee05..a55b1c712 100644 --- a/rules/rule-can-update-configmap-v1/test/clusterrole-rolebinding/expected.json +++ b/rules/rule-can-update-configmap-v1/test/clusterrole-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can modify 'coredns' configmap", + "reviewPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-can-update-configmap-v1/test/role-rolebinding/expected.json b/rules/rule-can-update-configmap-v1/test/role-rolebinding/expected.json index 099ff42b7..7ed1952e4 100644 --- a/rules/rule-can-update-configmap-v1/test/role-rolebinding/expected.json +++ b/rules/rule-can-update-configmap-v1/test/role-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can modify 'coredns' configmap", + "reviewPaths": ["relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[1]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[1]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-credentials-configmap/raw.rego b/rules/rule-credentials-configmap/raw.rego index 71d21beda..ae664b3ae 100644 --- a/rules/rule-credentials-configmap/raw.rego +++ b/rules/rule-credentials-configmap/raw.rego @@ -20,7 +20,7 @@ deny[msga] { "alertMessage": sprintf("this configmap has sensitive information: %v", [configmap.metadata.name]), "alertScore": 9, "deletePaths": [path], - "failedPaths": [path], + "failedPaths": [path], "fixPaths": [], "packagename": "armo_builtins", "alertObject": { diff --git a/rules/rule-credentials-configmap/test/test-base64/expected.json b/rules/rule-credentials-configmap/test/test-base64/expected.json index a4c43b332..e36d66b2a 100644 --- a/rules/rule-credentials-configmap/test/test-base64/expected.json +++ b/rules/rule-credentials-configmap/test/test-base64/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "this configmap has sensitive information: game-demo", + "deletePaths": ["data[pwd]"], "failedPaths": ["data[pwd]"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-credentials-configmap/test/test/expected.json b/rules/rule-credentials-configmap/test/test/expected.json index 2e7598582..3726c87e1 100644 --- a/rules/rule-credentials-configmap/test/test/expected.json +++ b/rules/rule-credentials-configmap/test/test/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "this configmap has sensitive information: game-demo", + "deletePaths": ["data[aws_access_key_id]"], "failedPaths": ["data[aws_access_key_id]"], "fixPaths": [], "ruleStatus": "", @@ -16,6 +17,7 @@ } }, { "alertMessage": "this configmap has sensitive information: game-demo", + "deletePaths": ["data[pwd]"], "failedPaths": ["data[pwd]"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-credentials-in-env-var/test/cronjob/expected.json b/rules/rule-credentials-in-env-var/test/cronjob/expected.json index 67999043f..91409049e 100644 --- a/rules/rule-credentials-in-env-var/test/cronjob/expected.json +++ b/rules/rule-credentials-in-env-var/test/cronjob/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Cronjob: hello has sensitive information in environment variables", + "deletePaths": [ + "spec.jobTemplate.spec.template.spec.containers[0].env[0].name" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.containers[0].env[0].name" ], diff --git a/rules/rule-credentials-in-env-var/test/deployment/expected.json b/rules/rule-credentials-in-env-var/test/deployment/expected.json index 5895545cc..ede9acff6 100644 --- a/rules/rule-credentials-in-env-var/test/deployment/expected.json +++ b/rules/rule-credentials-in-env-var/test/deployment/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Deployment: test2 has sensitive information in environment variables", + "deletePaths": [ + "spec.template.spec.containers[1].env[1].name" + ], "failedPaths": [ "spec.template.spec.containers[1].env[1].name" ], diff --git a/rules/rule-credentials-in-env-var/test/pod/expected.json b/rules/rule-credentials-in-env-var/test/pod/expected.json index 9324ba1a3..0f1ab8d87 100644 --- a/rules/rule-credentials-in-env-var/test/pod/expected.json +++ b/rules/rule-credentials-in-env-var/test/pod/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Pod: audit-pod has sensitive information in environment variables", + "deletePaths": [ + "spec.containers[0].env[1].name" + ], "failedPaths": [ "spec.containers[0].env[1].name" ], diff --git a/rules/rule-credentials-in-env-var/test/workloads/expected.json b/rules/rule-credentials-in-env-var/test/workloads/expected.json index 4be61b0ee..d109996e8 100644 --- a/rules/rule-credentials-in-env-var/test/workloads/expected.json +++ b/rules/rule-credentials-in-env-var/test/workloads/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Deployment: test2 has sensitive information in environment variables", + "deletePaths": [ + "spec.template.spec.containers[1].env[0].name" + ], "failedPaths": [ "spec.template.spec.containers[1].env[0].name" ], diff --git a/rules/rule-excessive-delete-rights-v1/test/clusterrole-clusterrolebinding/expected.json b/rules/rule-excessive-delete-rights-v1/test/clusterrole-clusterrolebinding/expected.json index d34f1ed74..34af998ec 100644 --- a/rules/rule-excessive-delete-rights-v1/test/clusterrole-clusterrolebinding/expected.json +++ b/rules/rule-excessive-delete-rights-v1/test/clusterrole-clusterrolebinding/expected.json @@ -1,6 +1,14 @@ [ { "alertMessage": "Subject: Group-manager can delete important resources", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].resources[1]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].resources[1]", @@ -72,6 +80,14 @@ }, { "alertMessage": "Subject: Group-dev can delete important resources", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].resources[1]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[1]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].resources[1]", diff --git a/rules/rule-excessive-delete-rights-v1/test/clusterrole-rolebinding/expected.json b/rules/rule-excessive-delete-rights-v1/test/clusterrole-rolebinding/expected.json index bb1a096ce..95648c446 100644 --- a/rules/rule-excessive-delete-rights-v1/test/clusterrole-rolebinding/expected.json +++ b/rules/rule-excessive-delete-rights-v1/test/clusterrole-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can delete important resources", + "reviewPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-excessive-delete-rights-v1/test/role-rolebinding/expected.json b/rules/rule-excessive-delete-rights-v1/test/role-rolebinding/expected.json index d858fe9f2..01d102d31 100644 --- a/rules/rule-excessive-delete-rights-v1/test/role-rolebinding/expected.json +++ b/rules/rule-excessive-delete-rights-v1/test/role-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane can delete important resources", + "reviewPaths": ["relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].resources[3]", "relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[1]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[1]", "relatedObjects[1].rules[0].resources[3]", "relatedObjects[1].rules[0].resources[4]", "relatedObjects[1].rules[0].verbs[1]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json b/rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json index cb77438f2..d8b342287 100644 --- a/rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json +++ b/rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "image 'quay.io/hi:latest' in container 'hello' comes from untrusted registry", + "reviewPaths": [ + "spec.jobTemplate.spec.template.spec.containers[0].image" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.containers[0].image" ], diff --git a/rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json b/rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json index 716d82434..9e2873142 100644 --- a/rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json +++ b/rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "image 'registry.hub.docker.com/php:7.0-apache' in container 'php' comes from untrusted registry", + "reviewPaths": [ + "spec.template.spec.containers[1].image" + ], "failedPaths": [ "spec.template.spec.containers[1].image" ], diff --git a/rules/rule-identify-old-k8s-registry/test/workloads/expected.json b/rules/rule-identify-old-k8s-registry/test/workloads/expected.json index bbe7fa4c3..af3e66e72 100644 --- a/rules/rule-identify-old-k8s-registry/test/workloads/expected.json +++ b/rules/rule-identify-old-k8s-registry/test/workloads/expected.json @@ -1 +1,26 @@ -[{"alertMessage":"image 'k8s.gcr.io/php:7.0-apache' in container 'php' comes from the deprecated k8s.gcr.io","failedPaths":["spec.template.spec.containers[0].image"],"fixPaths":[],"ruleStatus":"","packagename":"armo_builtins","alertScore":2,"alertObject":{"k8sApiObjects":[{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"my-deployment"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "image 'k8s.gcr.io/php:7.0-apache' in container 'php' comes from the deprecated k8s.gcr.io", + "reviewPaths": [ + "spec.template.spec.containers[0].image" + ], + "failedPaths": [ + "spec.template.spec.containers[0].image" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "my-deployment" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/rule-list-all-cluster-admins-v1/test/clusterrole-clusterrolebinding/expected.json b/rules/rule-list-all-cluster-admins-v1/test/clusterrole-clusterrolebinding/expected.json index 3ccc73bf7..079efef94 100644 --- a/rules/rule-list-all-cluster-admins-v1/test/clusterrole-clusterrolebinding/expected.json +++ b/rules/rule-list-all-cluster-admins-v1/test/clusterrole-clusterrolebinding/expected.json @@ -1,6 +1,13 @@ [ { "alertMessage": "Subject: Group-dev have high privileges, such as cluster-admin", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[1]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[1]", @@ -71,6 +78,13 @@ }, { "alertMessage": "Subject: Group-manager have high privileges, such as cluster-admin", + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[0]", + "relatedObjects[1].rules[0].verbs[1]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[1]", diff --git a/rules/rule-list-all-cluster-admins-v1/test/clusterrole-rolebinding/expected.json b/rules/rule-list-all-cluster-admins-v1/test/clusterrole-rolebinding/expected.json index 954f4664b..9eb2e6024 100644 --- a/rules/rule-list-all-cluster-admins-v1/test/clusterrole-rolebinding/expected.json +++ b/rules/rule-list-all-cluster-admins-v1/test/clusterrole-rolebinding/expected.json @@ -1,5 +1,6 @@ [{ "alertMessage": "Subject: User-jane have high privileges, such as cluster-admin", + "reviewPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "failedPaths": ["relatedObjects[1].rules[0].resources[0]", "relatedObjects[1].rules[0].verbs[0]", "relatedObjects[1].rules[0].apiGroups[0]", "relatedObjects[0].subjects[0]", "relatedObjects[0].roleRef.name"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-list-all-cluster-admins-v1/test/role-rolebinding/expected.json b/rules/rule-list-all-cluster-admins-v1/test/role-rolebinding/expected.json index 562493564..374300721 100644 --- a/rules/rule-list-all-cluster-admins-v1/test/role-rolebinding/expected.json +++ b/rules/rule-list-all-cluster-admins-v1/test/role-rolebinding/expected.json @@ -2,6 +2,14 @@ { "alertMessage": "Subject: User-jane have high privileges, such as cluster-admin", "fixPaths": [], + "reviewPaths": [ + "relatedObjects[1].rules[0].resources[2]", + "relatedObjects[1].rules[0].resources[4]", + "relatedObjects[1].rules[0].verbs[0]", + "relatedObjects[1].rules[0].apiGroups[0]", + "relatedObjects[0].subjects[0]", + "relatedObjects[0].roleRef.name" + ], "failedPaths": [ "relatedObjects[1].rules[0].resources[2]", "relatedObjects[1].rules[0].resources[4]", diff --git a/rules/rule-privileged-container/test/cronjob/expected.json b/rules/rule-privileged-container/test/cronjob/expected.json index 5d0c60296..a34a03537 100644 --- a/rules/rule-privileged-container/test/cronjob/expected.json +++ b/rules/rule-privileged-container/test/cronjob/expected.json @@ -2,6 +2,9 @@ { "alertMessage": "the following cronjobs are defined as privileged: hello", "fixPaths": [], + "deletePaths": [ + "spec.jobTemplate.spec.template.spec.containers[0].securityContext.capabilities.add[2]" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.containers[0].securityContext.capabilities.add[2]" ], diff --git a/rules/rule-privileged-container/test/pod/expected.json b/rules/rule-privileged-container/test/pod/expected.json index 9228c3e9b..4406bd4ab 100644 --- a/rules/rule-privileged-container/test/pod/expected.json +++ b/rules/rule-privileged-container/test/pod/expected.json @@ -2,6 +2,9 @@ { "alertMessage": "the following pods are defined as privileged: audit-pod", "fixPaths": [], + "deletePaths": [ + "spec.containers[0].securityContext.capabilities.add[1]" + ], "failedPaths": [ "spec.containers[0].securityContext.capabilities.add[1]" ], diff --git a/rules/rule-privileged-container/test/workloads/expected.json b/rules/rule-privileged-container/test/workloads/expected.json index 13c4b9ff2..b320a882d 100644 --- a/rules/rule-privileged-container/test/workloads/expected.json +++ b/rules/rule-privileged-container/test/workloads/expected.json @@ -2,6 +2,9 @@ { "alertMessage": "Deployment: test2 is defined as privileged:", "fixPaths": [], + "deletePaths": [ + "spec.template.spec.containers[0].securityContext.privileged" + ], "failedPaths": [ "spec.template.spec.containers[0].securityContext.privileged" ], diff --git a/rules/rule-secrets-in-env-var/test/cronjob/expected.json b/rules/rule-secrets-in-env-var/test/cronjob/expected.json index 11314a433..da55814e8 100644 --- a/rules/rule-secrets-in-env-var/test/cronjob/expected.json +++ b/rules/rule-secrets-in-env-var/test/cronjob/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Cronjob: hello has secrets in environment variables", + "deletePaths": [ + "spec.jobTemplate.spec.template.spec.containers[0].env[0].name" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.containers[0].env[0].name" ], diff --git a/rules/rule-secrets-in-env-var/test/pod/expected.json b/rules/rule-secrets-in-env-var/test/pod/expected.json index f489bf575..2c22ead07 100644 --- a/rules/rule-secrets-in-env-var/test/pod/expected.json +++ b/rules/rule-secrets-in-env-var/test/pod/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Pod: audit-pod has secrets in environment variables", + "deletePaths": [ + "spec.containers[1].env[0].name" + ], "failedPaths": [ "spec.containers[1].env[0].name" ], diff --git a/rules/rule-secrets-in-env-var/test/workloads/expected.json b/rules/rule-secrets-in-env-var/test/workloads/expected.json index c430dc185..e1a71fa6f 100644 --- a/rules/rule-secrets-in-env-var/test/workloads/expected.json +++ b/rules/rule-secrets-in-env-var/test/workloads/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Deployment: test2 has secrets in environment variables", + "deletePaths": [ + "spec.template.spec.containers[1].env[1].name" + ], "failedPaths": [ "spec.template.spec.containers[1].env[1].name" ], diff --git a/rules/secret-etcd-encryption-cloud/test/gke/expected.json b/rules/secret-etcd-encryption-cloud/test/gke/expected.json index 5fa3ec1a8..c58eca499 100644 --- a/rules/secret-etcd-encryption-cloud/test/gke/expected.json +++ b/rules/secret-etcd-encryption-cloud/test/gke/expected.json @@ -3,6 +3,7 @@ "fixCommand": "gcloud container clusters update \u003ccluster_name\u003e --region=\u003ccompute_region\u003e --database-encryption-key=\u003ckey_project_id\u003e/locations/\u003clocation\u003e/keyRings/\u003cring_name\u003e/cryptoKeys/\u003ckey_name\u003e --project=\u003ccluster_project_id\u003e", "alertMessage": "etcd/secret encryption is not enabled", "fixPaths": [], + "reviewPaths": ["data.database_encryption.state"], "failedPaths": ["data.database_encryption.state"], "ruleStatus": "", "packagename": "armo_builtins", diff --git a/rules/service-in-default-namespace/test/service/expected.json b/rules/service-in-default-namespace/test/service/expected.json index 32879c5d2..3b631335a 100644 --- a/rules/service-in-default-namespace/test/service/expected.json +++ b/rules/service-in-default-namespace/test/service/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Service: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/serviceaccount-in-default-namespace/test/serviceaccount/expected.json b/rules/serviceaccount-in-default-namespace/test/serviceaccount/expected.json index 72f5bcaad..37a2ebe9a 100644 --- a/rules/serviceaccount-in-default-namespace/test/serviceaccount/expected.json +++ b/rules/serviceaccount-in-default-namespace/test/serviceaccount/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "ServiceAccount: kubescape is in the 'default' namespace", + "reviewPaths": [ + "metadata.namespace" + ], "failedPaths": [ "metadata.namespace" ], diff --git a/rules/serviceaccount-token-mount/test/pod-mount-and-rb-bind/expected.json b/rules/serviceaccount-token-mount/test/pod-mount-and-rb-bind/expected.json index 92a609206..5a67098c1 100644 --- a/rules/serviceaccount-token-mount/test/pod-mount-and-rb-bind/expected.json +++ b/rules/serviceaccount-token-mount/test/pod-mount-and-rb-bind/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Pod: test-pd in the following namespace: default mounts service account tokens by default", + "reviewPaths": [ + "spec.automountServiceAccountToken" + ], "failedPaths": [ "spec.automountServiceAccountToken" ], diff --git a/rules/set-fsgroup-value/test/cronjob/expected.json b/rules/set-fsgroup-value/test/cronjob/expected.json index 796182aae..b6e50f572 100644 --- a/rules/set-fsgroup-value/test/cronjob/expected.json +++ b/rules/set-fsgroup-value/test/cronjob/expected.json @@ -3,6 +3,7 @@ "alertMessage": "CronJob: hello1 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": ["spec.jobTemplate.spec.template.spec.securityContext"], "failedPaths": ["spec.jobTemplate.spec.template.spec.securityContext"], "fixPaths": [], "ruleStatus": "", @@ -22,6 +23,7 @@ "alertMessage": "CronJob: hello2 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths": [{"path":"spec.jobTemplate.spec.template.spec.securityContext.fsGroup", "value": "YOUR_VALUE"}], "ruleStatus": "", diff --git a/rules/set-fsgroup-value/test/pod/expected.json b/rules/set-fsgroup-value/test/pod/expected.json index b3efe8863..2c2989d96 100644 --- a/rules/set-fsgroup-value/test/pod/expected.json +++ b/rules/set-fsgroup-value/test/pod/expected.json @@ -3,6 +3,7 @@ "alertMessage": "Pod: nginx1 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": ["spec.securityContext"], "failedPaths": ["spec.securityContext"], "fixPaths": [], "ruleStatus": "", @@ -22,6 +23,7 @@ "alertMessage": "Pod: nginx2 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths": [{"path":"spec.securityContext.fsGroup", "value": "YOUR_VALUE"}], "ruleStatus": "", diff --git a/rules/set-fsgroup-value/test/workload/expected.json b/rules/set-fsgroup-value/test/workload/expected.json index e9010839f..f20bafa55 100644 --- a/rules/set-fsgroup-value/test/workload/expected.json +++ b/rules/set-fsgroup-value/test/workload/expected.json @@ -3,6 +3,7 @@ "alertMessage": "Workload: my-deployment1 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": ["spec.template.spec.securityContext"], "failedPaths": ["spec.template.spec.securityContext"], "fixPaths": [], "ruleStatus": "", @@ -25,6 +26,7 @@ "alertMessage": "Workload: my-deployment2 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": [], "failedPaths": [], "fixPaths": [{"path":"spec.template.spec.securityContext.fsGroup", "value": "YOUR_VALUE"}], "ruleStatus": "", diff --git a/rules/set-procmount-default/test/cronjob/expected.json b/rules/set-procmount-default/test/cronjob/expected.json index bb6addfc4..26c2a3001 100644 --- a/rules/set-procmount-default/test/cronjob/expected.json +++ b/rules/set-procmount-default/test/cronjob/expected.json @@ -3,6 +3,7 @@ "alertMessage": "CronJob: hello has containers that do not set 'securityContext.procMount' to 'Default'", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": ["containers[0].securityContext.procMount"], "failedPaths": ["containers[0].securityContext.procMount"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/set-procmount-default/test/pod/expected.json b/rules/set-procmount-default/test/pod/expected.json index 2c8aeaf11..4c75f435c 100644 --- a/rules/set-procmount-default/test/pod/expected.json +++ b/rules/set-procmount-default/test/pod/expected.json @@ -3,6 +3,7 @@ "alertMessage": "Pod: nginx has containers that do not set 'securityContext.procMount' to 'Default'", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": ["containers[1].securityContext.procMount"], "failedPaths": ["containers[1].securityContext.procMount"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/set-procmount-default/test/workload/expected.json b/rules/set-procmount-default/test/workload/expected.json index 16ab5f6a3..bb95bf961 100644 --- a/rules/set-procmount-default/test/workload/expected.json +++ b/rules/set-procmount-default/test/workload/expected.json @@ -3,6 +3,7 @@ "alertMessage": "Workload: my-deployment has containers that do not set 'securityContext.procMount' to 'Default'", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": ["containers[1].securityContext.procMount"], "failedPaths": ["containers[1].securityContext.procMount"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/set-seccomp-profile-RuntimeDefault/test/cronjob/expected.json b/rules/set-seccomp-profile-RuntimeDefault/test/cronjob/expected.json index 7a345e6e6..34a2f6f92 100644 --- a/rules/set-seccomp-profile-RuntimeDefault/test/cronjob/expected.json +++ b/rules/set-seccomp-profile-RuntimeDefault/test/cronjob/expected.json @@ -1,6 +1,7 @@ [ { "alertMessage": "Cronjob: hello does not define seccompProfile as RuntimeDefault", + "reviewPaths": [], "failedPaths": [], "fixPaths": [ { @@ -25,6 +26,9 @@ }, { "alertMessage": "Cronjob: hello does not define seccompProfile as RuntimeDefault", + "reviewPaths": [ + "spec.jobTemplate.spec.template.spec.containers[1].securityContext.seccompProfile.type" + ], "failedPaths": [ "spec.jobTemplate.spec.template.spec.containers[1].securityContext.seccompProfile.type" ], diff --git a/rules/set-seccomp-profile-RuntimeDefault/test/pod/expected.json b/rules/set-seccomp-profile-RuntimeDefault/test/pod/expected.json index f4f28c157..daefae833 100644 --- a/rules/set-seccomp-profile-RuntimeDefault/test/pod/expected.json +++ b/rules/set-seccomp-profile-RuntimeDefault/test/pod/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Pod: audit-pod does not define seccompProfile as RuntimeDefault", + "reviewPaths": [ + "spec.containers[1].securityContext.seccompProfile.type" + ], "failedPaths": [ "spec.containers[1].securityContext.seccompProfile.type" ], @@ -25,6 +28,9 @@ }, { "alertMessage": "Pod: audit-pod does not define seccompProfile as RuntimeDefault", + "reviewPaths": [ + "spec.securityContext.seccompProfile.type" + ], "failedPaths": [ "spec.securityContext.seccompProfile.type" ], diff --git a/rules/set-seccomp-profile-RuntimeDefault/test/workloads/expected.json b/rules/set-seccomp-profile-RuntimeDefault/test/workloads/expected.json index 33703e3b9..df562dba8 100644 --- a/rules/set-seccomp-profile-RuntimeDefault/test/workloads/expected.json +++ b/rules/set-seccomp-profile-RuntimeDefault/test/workloads/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Workload: my-deployment does not define seccompProfile as RuntimeDefault", + "reviewPaths": [ + "spec.template.spec.containers[1].securityContext.seccompProfile.type" + ], "failedPaths": [ "spec.template.spec.containers[1].securityContext.seccompProfile.type" ], diff --git a/rules/set-supplementalgroups-values/test/cronjob/expected.json b/rules/set-supplementalgroups-values/test/cronjob/expected.json index b7f838601..c828ba8cf 100644 --- a/rules/set-supplementalgroups-values/test/cronjob/expected.json +++ b/rules/set-supplementalgroups-values/test/cronjob/expected.json @@ -3,6 +3,7 @@ "alertMessage": "CronJob: hello does not set 'securityContext.supplementalGroups'", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": ["spec.jobTemplate.spec.template.spec.securityContext"], "failedPaths": ["spec.jobTemplate.spec.template.spec.securityContext"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/set-supplementalgroups-values/test/pod/expected.json b/rules/set-supplementalgroups-values/test/pod/expected.json index aeef057c1..f6f7dcf0a 100644 --- a/rules/set-supplementalgroups-values/test/pod/expected.json +++ b/rules/set-supplementalgroups-values/test/pod/expected.json @@ -3,6 +3,7 @@ "alertMessage": "Pod: nginx does not set 'securityContext.supplementalGroups'", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": ["spec.securityContext"], "failedPaths": ["spec.securityContext"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/set-supplementalgroups-values/test/workload/expected.json b/rules/set-supplementalgroups-values/test/workload/expected.json index 7aa5123fa..e093db121 100644 --- a/rules/set-supplementalgroups-values/test/workload/expected.json +++ b/rules/set-supplementalgroups-values/test/workload/expected.json @@ -3,6 +3,7 @@ "alertMessage": "Workload: my-deployment does not set 'securityContext.supplementalGroups'", "packagename": "armo_builtins", "alertScore": 7, + "reviewPaths": ["spec.template.spec.securityContext"], "failedPaths": ["spec.template.spec.securityContext"], "fixPaths": [], "ruleStatus": "", diff --git a/rules/sudo-in-container-entrypoint/test/pod/expected.json b/rules/sudo-in-container-entrypoint/test/pod/expected.json index 9caa17df9..258d4ca67 100644 --- a/rules/sudo-in-container-entrypoint/test/pod/expected.json +++ b/rules/sudo-in-container-entrypoint/test/pod/expected.json @@ -2,6 +2,9 @@ { "alertMessage": "container: command-demo-container in pod: command-demo have sudo in entrypoint", "fixPaths": [], + "reviewPaths": [ + "spec.containers[0].command[0]" + ], "failedPaths": [ "spec.containers[0].command[0]" ], diff --git a/rules/sudo-in-container-entrypoint/test/workloads/expected.json b/rules/sudo-in-container-entrypoint/test/workloads/expected.json index e8a858f26..9dd3bb964 100644 --- a/rules/sudo-in-container-entrypoint/test/workloads/expected.json +++ b/rules/sudo-in-container-entrypoint/test/workloads/expected.json @@ -2,6 +2,9 @@ { "alertMessage": "container: test-container2 in Deployment: test2 have sudo in entrypoint", "fixPaths": [], + "reviewPaths": [ + "spec.template.spec.containers[1].command[0]" + ], "failedPaths": [ "spec.template.spec.containers[1].command[0]" ], diff --git a/rules/workload-mounted-configmap/test/failed_pod/expected.json b/rules/workload-mounted-configmap/test/failed_pod/expected.json index 1d1c00abf..c4f9a6dc1 100644 --- a/rules/workload-mounted-configmap/test/failed_pod/expected.json +++ b/rules/workload-mounted-configmap/test/failed_pod/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Pod: mypod has mounted configMap", + "deletePaths": [ + "spec.containers[0].volumeMounts[1]" + ], "failedPaths": [ "spec.containers[0].volumeMounts[1]" ], diff --git a/rules/workload-mounted-pvc/test/failed_pod_mounted/expected.json b/rules/workload-mounted-pvc/test/failed_pod_mounted/expected.json index 9c86b72e6..6a558c3e4 100644 --- a/rules/workload-mounted-pvc/test/failed_pod_mounted/expected.json +++ b/rules/workload-mounted-pvc/test/failed_pod_mounted/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Pod: mypod has mounted PVC", + "deletePaths": [ + "spec.containers[0].volumeMounts[0]" + ], "failedPaths": [ "spec.containers[0].volumeMounts[0]" ], diff --git a/rules/workload-mounted-secrets/test/failed/expected.json b/rules/workload-mounted-secrets/test/failed/expected.json index aa3edc9cf..9f3d77021 100644 --- a/rules/workload-mounted-secrets/test/failed/expected.json +++ b/rules/workload-mounted-secrets/test/failed/expected.json @@ -1,6 +1,9 @@ [ { "alertMessage": "Pod: mypod has mounted secret", + "deletePaths": [ + "spec.containers[0].volumeMounts[0]" + ], "failedPaths": [ "spec.containers[0].volumeMounts[0]" ], diff --git a/testrunner/rego_test.go b/testrunner/rego_test.go index d6e682c06..4add17838 100644 --- a/testrunner/rego_test.go +++ b/testrunner/rego_test.go @@ -52,7 +52,7 @@ func TestSingleRule(t *testing.T) { // To print the output // Change the testDir variable to the directory of the rego you want to test func TestSingleRego(t *testing.T) { - testDir := "ensure-endpointprivateaccess-is-enabled" + testDir := "ensure-that-the-scheduler-profiling-argument-is-set-to-false" dir := fmt.Sprintf("%v/input", testSingleRegoDirectory) mocks, err := os.Open(dir) if err != nil { From b948f9a794ccca86f38b0ed0b46ad536f51458a3 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 21 Nov 2023 16:18:32 +0200 Subject: [PATCH 036/195] Update pr-tests.yaml Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- .github/workflows/pr-tests.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index b507ea281..ff440c71c 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -17,15 +17,15 @@ env: REGO_ARTIFACT_PATH: releaseDev jobs: - # testing link checks - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - - name: Check links - uses: gaurav-nelson/github-action-markdown-link-check@5c5dfc0ac2e225883c0e5f03a85311ec2830d368 - with: - use-verbose-mode: 'yes' + # # testing link checks + # markdown-link-check: + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + # - name: Check links + # uses: gaurav-nelson/github-action-markdown-link-check@5c5dfc0ac2e225883c0e5f03a85311ec2830d368 + # with: + # use-verbose-mode: 'yes' # main job of testing and building the env. test_pr_checks: From f48fccd34f1d3a9782d353ccae7c2c9deca5f6f9 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Thu, 23 Nov 2023 13:34:42 +0200 Subject: [PATCH 037/195] remove from README.md Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index b74488e7a..6a8a2b5b2 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,6 @@ Example of a framework: "name": "DevOpsBest", "description": "This framework is recommended for use by devops.", "attributes": { - "armoBuiltin": true }, "scanningScope": { "matches": [ @@ -53,7 +52,6 @@ Example of a framework: ] } ``` -* Attribute `"armoBuiltin": true` - mandatory for armo rules. Only ARMO team members are authorized to create builtin objects. * controlNames - List of controls to run, must be exact name. Use copy-paste to be sure. * `scanningScope` - this framework will run just if kubescape scan process match to the scope in the list.(for example the framework above will run if the running kubescape scan is for scanning cluster or file) - list of allowed scanning scope ``` [["cluster", "file"], ["cluster"], ["cloud"], ["GKE"], ["EKS"], ["AKS"]] ```. `cloud` meaning - will run just on managed cluster @@ -67,7 +65,6 @@ Example of a control: { "name": "Pods in default namespace", "attributes": { - "armoBuiltin": true }, "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", "remediation": "Create necessary namespaces and move all the pods from default namespace there.", @@ -93,7 +90,6 @@ Example of a control: } } ``` -* Attribute `"armoBuiltin": true` - mandatory for armo rules. Only ARMO team members are authorized to create builtin objects. * `rulesNames` - List of rules to run, must be exact name. Use copy-paste to be sure. * `scanningScope` - this control will run just if kubescape scan process match to the scope in the list.(for example the control above will run if the running kubescape scan is for scanning cluster or file) - list of allowed scanning scope ``` [["cluster", "file"], ["cluster"], ["cloud"], ["GKE"], ["EKS"], ["AKS"]] ```. `cloud` meaning - will run just on managed cluster * `category` - The category the control belongs to. Some controls may also define a `subCategory`. The available categories/sub categories are listed under the `mapCategoryNameToID.json` file, mapped to their respective IDs @@ -115,7 +111,6 @@ Example of rule.metadata.json: { "name": "resources-cpu-limit-and-request", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ @@ -145,7 +140,6 @@ Example of rule.metadata.json: "ruleQuery": "armo_builtins" } ``` -* Attribute `"armoBuiltin": true` - mandatory for armo rules. Only ARMO team members are authorized to create builtin objects. * See [rule go struct](https://github.com/kubescape/opa-utils/blob/master/reporthandling/datastructures.go#L37) for further explanations of rule fields From 16c0ce277d0bca651843ec7c9bfa383ef53fd252 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Thu, 23 Nov 2023 14:53:38 +0200 Subject: [PATCH 038/195] removing armoBuiltin from all controls --- controls/C-0001-forbiddencontainerregistries.json | 1 - controls/C-0002-execintocontainer.json | 1 - controls/C-0004-resourcesmemorylimitandrequest.json | 1 - controls/C-0005-apiserverinsecureportisenabled.json | 1 - controls/C-0007-datadestruction.json | 1 - controls/C-0009-resourcelimits.json | 1 - .../C-0012-applicationscredentialsinconfigurationfiles.json | 1 - controls/C-0013-nonrootcontainers.json | 1 - controls/C-0014-accesskubernetesdashboard.json | 1 - controls/C-0015-listkubernetessecrets.json | 1 - controls/C-0016-allowprivilegeescalation.json | 1 - controls/C-0017-immutablecontainerfilesystem.json | 1 - controls/C-0018-configuredreadinessprobe.json | 1 - controls/C-0020-mountserviceprincipal.json | 1 - controls/C-0021-exposedsensitiveinterfaces.json | 1 - controls/C-0026-kubernetescronjob.json | 1 - controls/C-0030-ingressandegressblocked.json | 1 - controls/C-0031-deletekubernetesevents.json | 1 - controls/C-0034-automaticmappingofserviceaccount.json | 1 - controls/C-0035-clusteradminbinding.json | 1 - controls/C-0036-maliciousadmissioncontrollervalidating.json | 1 - controls/C-0037-corednspoisoning.json | 1 - controls/C-0038-hostpidipcprivileges.json | 1 - controls/C-0039-maliciousadmissioncontrollermutating.json | 1 - controls/C-0041-hostnetworkaccess.json | 1 - controls/C-0042-sshserverrunninginsidecontainer.json | 1 - controls/C-0044-containerhostport.json | 1 - controls/C-0045-writablehostpathmount.json | 1 - controls/C-0046-insecurecapabilities.json | 1 - controls/C-0048-hostpathmount.json | 1 - controls/C-0049-networkmapping.json | 1 - controls/C-0050-resourcescpulimitandrequest.json | 1 - controls/C-0052-instancemetadataapi.json | 1 - controls/C-0053-accesscontainerserviceaccount.json | 1 - controls/C-0054-clusterinternalnetworking.json | 1 - controls/C-0055-linuxhardening.json | 1 - controls/C-0056-configuredlivenessprobe.json | 1 - controls/C-0057-privilegedcontainer.json | 1 - ...e202125741usingsymlinkforarbitraryhostfilesystemaccess.json | 1 - ...cve202125742nginxingresssnippetannotationvulnerability.json | 1 - controls/C-0061-podsindefaultnamespace.json | 1 - controls/C-0062-sudoincontainerentrypoint.json | 1 - controls/C-0063-portforwardingprivileges.json | 1 - controls/C-0065-noimpersonation.json | 1 - controls/C-0066-secretetcdencryptionenabled.json | 1 - controls/C-0067-auditlogsenabled.json | 1 - controls/C-0068-pspenabled.json | 1 - controls/C-0069-disableanonymousaccesstokubeletservice.json | 1 - controls/C-0070-enforcekubeletclienttlsauthentication.json | 1 - controls/C-0073-nakedpods.json | 1 - controls/C-0074-containersmountingdockersocket.json | 1 - controls/C-0075-imagepullpolicyonlatesttag.json | 1 - controls/C-0076-labelusageforresources.json | 1 - controls/C-0077-k8scommonlabelsusage.json | 1 - controls/C-0078-imagesfromallowedregistry.json | 1 - controls/C-0079-cve20220185linuxkernelcontainerescape.json | 1 - controls/C-0081-cve202224348argocddirtraversal.json | 1 - ...adswithcriticalvulnerabilitiesexposedtoexternaltraffic.json | 1 - ...orkloadswithrcevulnerabilitiesexposedtoexternaltraffic.json | 1 - .../C-0085-workloadswithexcessiveamountofvulnerabilities.json | 1 - controls/C-0086-cve20220492cgroupscontainerescape.json | 1 - controls/C-0087-cve202223648containerdfsescape.json | 1 - controls/C-0088-rbacenabled.json | 1 - controls/C-0089-cve20223172aggregatedapiserverredirect.json | 1 - controls/C-0090-cve202239328grafanaauthbypass.json | 1 - controls/C-0091-cve202247633kyvernosignaturebypass.json | 1 - ...ecificationfilepermissionsaresetto600ormorerestrictive.json | 1 - ...eapiserverpodspecificationfileownershipissettorootroot.json | 1 - ...ecificationfilepermissionsaresetto600ormorerestrictive.json | 1 - ...lermanagerpodspecificationfileownershipissettorootroot.json | 1 - ...ecificationfilepermissionsaresetto600ormorerestrictive.json | 1 - ...eschedulerpodspecificationfileownershipissettorootroot.json | 1 - ...ecificationfilepermissionsaresetto600ormorerestrictive.json | 1 - ...hattheetcdpodspecificationfileownershipissettorootroot.json | 1 - ...rkinterfacefilepermissionsaresetto600ormorerestrictive.json | 1 - ...econtainernetworkinterfacefileownershipissettorootroot.json | 1 - ...cddatadirectorypermissionsaresetto700ormorerestrictive.json | 1 - ...ensurethattheetcddatadirectoryownershipissettoetcdetcd.json | 1 - ...-0104-ensurethattheadminconffilepermissionsaresetto600.json | 1 - ...105-ensurethattheadminconffileownershipissettorootroot.json | 1 - ...hedulerconffilepermissionsaresetto600ormorerestrictive.json | 1 - ...ensurethattheschedulerconffileownershipissettorootroot.json | 1 - ...managerconffilepermissionsaresetto600ormorerestrictive.json | 1 - ...atthecontrollermanagerconffileownershipissettorootroot.json | 1 - ...ekubernetespkidirectoryandfileownershipissettorootroot.json | 1 - ...certificatefilepermissionsaresetto600ormorerestrictive.json | 1 - ...nsurethatthekubernetespkikeyfilepermissionsaresetto600.json | 1 - ...nsurethattheapiserveranonymousauthargumentissettofalse.json | 1 - ...4-ensurethattheapiservertokenauthfileparameterisnotset.json | 1 - ...5-ensurethattheapiserverdenyserviceexternalipsisnotset.json | 1 - ...ificateandkubeletclientkeyargumentsaresetasappropriate.json | 1 - ...rkubeletcertificateauthorityargumentissetasappropriate.json | 1 - ...piserverauthorizationmodeargumentisnotsettoalwaysallow.json | 1 - ...ethattheapiserverauthorizationmodeargumentincludesnode.json | 1 - ...ethattheapiserverauthorizationmodeargumentincludesrbac.json | 1 - ...ensurethattheadmissioncontrolplugineventratelimitisset.json | 1 - ...ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json | 1 - ...surethattheadmissioncontrolpluginalwayspullimagesisset.json | 1 - ...insecuritycontextdenyissetifpodsecuritypolicyisnotused.json | 1 - ...ensurethattheadmissioncontrolpluginserviceaccountisset.json | 1 - ...rethattheadmissioncontrolpluginnamespacelifecycleisset.json | 1 - ...nsurethattheadmissioncontrolpluginnoderestrictionisset.json | 1 - ...28-ensurethattheapiserversecureportargumentisnotsetto0.json | 1 - ...29-ensurethattheapiserverprofilingargumentissettofalse.json | 1 - ...C-0130-ensurethattheapiserverauditlogpathargumentisset.json | 1 - ...piserverauditlogmaxageargumentissetto30orasappropriate.json | 1 - ...erverauditlogmaxbackupargumentissetto10orasappropriate.json | 1 - ...serverauditlogmaxsizeargumentissetto100orasappropriate.json | 1 - ...attheapiserverrequesttimeoutargumentissetasappropriate.json | 1 - ...hattheapiserverserviceaccountlookupargumentissettotrue.json | 1 - ...iserverserviceaccountkeyfileargumentissetasappropriate.json | 1 - ...etcdcertfileandetcdkeyfileargumentsaresetasappropriate.json | 1 - ...rtfileandtlsprivatekeyfileargumentsaresetasappropriate.json | 1 - ...thattheapiserverclientcafileargumentissetasappropriate.json | 1 - ...rethattheapiserveretcdcafileargumentissetasappropriate.json | 1 - ...rverencryptionproviderconfigargumentissetasappropriate.json | 1 - ...nsurethatencryptionprovidersareappropriatelyconfigured.json | 1 - ...attheapiserveronlymakesuseofstrongcryptographicciphers.json | 1 - ...agerterminatedpodgcthresholdargumentissetasappropriate.json | 1 - ...ethatthecontrollermanagerprofilingargumentissettofalse.json | 1 - ...manageruseserviceaccountcredentialsargumentissettotrue.json | 1 - ...serviceaccountprivatekeyfileargumentissetasappropriate.json | 1 - ...econtrollermanagerrootcafileargumentissetasappropriate.json | 1 - ...nagerrotatekubeletservercertificateargumentissettotrue.json | 1 - ...atthecontrollermanagerbindaddressargumentissetto127001.json | 1 - ...51-ensurethattheschedulerprofilingargumentissettofalse.json | 1 - ...ensurethattheschedulerbindaddressargumentissetto127001.json | 1 - ...ethatthecertfileandkeyfileargumentsaresetasappropriate.json | 1 - .../C-0154-ensurethattheclientcertauthargumentissettotrue.json | 1 - .../C-0155-ensurethattheautotlsargumentisnotsettotrue.json | 1 - ...peercertfileandpeerkeyfileargumentsaresetasappropriate.json | 1 - ...157-ensurethatthepeerclientcertauthargumentissettotrue.json | 1 - .../C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json | 1 - ...159-ensurethatauniquecertificateauthorityisusedforetcd.json | 1 - controls/C-0160-ensurethataminimalauditpolicyiscreated.json | 1 - ...0161-ensurethattheauditpolicycoverskeysecurityconcerns.json | 1 - ...eletservicefilepermissionsaresetto600ormorerestrictive.json | 1 - ...nsurethatthekubeletservicefileownershipissettorootroot.json | 1 - ...ileexistsensurepermissionsaresetto600ormorerestrictive.json | 1 - ...roxykubeconfigfileexistsensureownershipissettorootroot.json | 1 - ...kubeletconffilepermissionsaresetto600ormorerestrictive.json | 1 - ...atthekubeconfigkubeletconffileownershipissettorootroot.json | 1 - ...authoritiesfilepermissionsaresetto600ormorerestrictive.json | 1 - ...ientcertificateauthoritiesfileownershipissettorootroot.json | 1 - ...sbeingusedvalidatepermissionssetto600ormorerestrictive.json | 1 - ...ionfileisbeingusedvalidatefileownershipissettorootroot.json | 1 - .../C-0172-ensurethattheanonymousauthargumentissettofalse.json | 1 - ...ethattheauthorizationmodeargumentisnotsettoalwaysallow.json | 1 - ...74-ensurethattheclientcafileargumentissetasappropriate.json | 1 - controls/C-0175-verifythatthereadonlyportargumentissetto0.json | 1 - ...atthestreamingconnectionidletimeoutargumentisnotsetto0.json | 1 - ...-ensurethattheprotectkerneldefaultsargumentissettotrue.json | 1 - ...ensurethatthemakeiptablesutilchainsargumentissettotrue.json | 1 - .../C-0179-ensurethatthehostnameoverrideargumentisnotset.json | 1 - ...entissetto0oralevelwhichensuresappropriateeventcapture.json | 1 - ...rtfileandtlsprivatekeyfileargumentsaresetasappropriate.json | 1 - ...ensurethattherotatecertificatesargumentisnotsettofalse.json | 1 - ...attherotatekubeletservercertificateargumentissettotrue.json | 1 - ...thatthekubeletonlymakesuseofstrongcryptographicciphers.json | 1 - ...5-ensurethattheclusteradminroleisonlyusedwhererequired.json | 1 - controls/C-0186-minimizeaccesstosecrets.json | 1 - controls/C-0187-minimizewildcarduseinrolesandclusterroles.json | 1 - controls/C-0188-minimizeaccesstocreatepods.json | 1 - ...189-ensurethatdefaultserviceaccountsarenotactivelyused.json | 1 - ...rethatserviceaccounttokensareonlymountedwherenecessary.json | 1 - ...mpersonateandescalatepermissionsinthekubernetescluster.json | 1 - ...lusterhasatleastoneactivepolicycontrolmechanisminplace.json | 1 - .../C-0193-minimizetheadmissionofprivilegedcontainers.json | 1 - ...ionofcontainerswishingtosharethehostprocessidnamespace.json | 1 - ...admissionofcontainerswishingtosharethehostipcnamespace.json | 1 - ...ssionofcontainerswishingtosharethehostnetworknamespace.json | 1 - ...zetheadmissionofcontainerswithallowprivilegeescalation.json | 1 - controls/C-0198-minimizetheadmissionofrootcontainers.json | 1 - ...nimizetheadmissionofcontainerswiththenet_rawcapability.json | 1 - ...-minimizetheadmissionofcontainerswithaddedcapabilities.json | 1 - ...nimizetheadmissionofcontainerswithcapabilitiesassigned.json | 1 - ...202-minimizetheadmissionofwindowshostprocesscontainers.json | 1 - controls/C-0203-minimizetheadmissionofhostpathvolumes.json | 1 - ...0204-minimizetheadmissionofcontainerswhichusehostports.json | 1 - .../C-0205-ensurethatthecniinusesupportsnetworkpolicies.json | 1 - ...0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json | 1 - ...erusingsecretsasfilesoversecretsasenvironmentvariables.json | 1 - controls/C-0208-considerexternalsecretstorage.json | 1 - ...dministrativeboundariesbetweenresourcesusingnamespaces.json | 1 - ...seccompprofileissettodockerdefaultinyourpoddefinitions.json | 1 - .../C-0211-applysecuritycontexttoyourpodsandcontainers.json | 1 - controls/C-0212-thedefaultnamespaceshouldnotbeused.json | 1 - .../C-0213-minimizetheadmissionofprivilegedcontainers.json | 1 - ...ionofcontainerswishingtosharethehostprocessidnamespace.json | 1 - ...admissionofcontainerswishingtosharethehostipcnamespace.json | 1 - ...ssionofcontainerswishingtosharethehostnetworknamespace.json | 1 - ...zetheadmissionofcontainerswithallowprivilegeescalation.json | 1 - controls/C-0218-minimizetheadmissionofrootcontainers.json | 1 - ...-minimizetheadmissionofcontainerswithaddedcapabilities.json | 1 - ...nimizetheadmissionofcontainerswithcapabilitiesassigned.json | 1 - ...anningusingamazonecrimagescanningorathirdpartyprovider.json | 1 - controls/C-0222-minimizeuseraccesstoamazonecr.json | 1 - .../C-0223-minimizeclusteraccesstoreadonlyforamazonecr.json | 1 - controls/C-0225-preferusingdedicatedeksserviceaccounts.json | 1 - .../C-0226-preferusingacontaineroptimizedoswhenpossible.json | 1 - controls/C-0227-restrictaccesstothecontrolplaneendpoint.json | 1 - ...eatedwithprivateendpointenabledandpublicaccessdisabled.json | 1 - controls/C-0229-ensureclustersarecreatedwithprivatenodes.json | 1 - ...C-0230-ensurenetworkpolicyisenabledandsetasappropriate.json | 1 - ...-encrypttraffictohttpsloadbalancerswithtlscertificates.json | 1 - ...siamauthenticatorforkubernetesorupgradetoawscliv116156.json | 1 - .../C-0233-considerfargateforrunninguntrustedworkloads.json | 1 - controls/C-0234-considerexternalsecretstorage.json | 1 - ...nfigurationfilehaspermissionssetto644ormorerestrictive.json | 1 - controls/C-0236-verifyimagesignature.json | 3 +-- controls/C-0237-hasimagesignature.json | 1 - ...ekubeconfigfilepermissionsaresetto644ormorerestrictive.json | 1 - controls/C-0239-preferusingdedicatedaksserviceaccounts.json | 1 - ...C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json | 1 - controls/C-0241-useazurerbacforkubernetesauthorization.json | 1 - controls/C-0242-hostilemultitenantworkloads.json | 1 - ...ngusingazuredefenderimagescanningorathirdpartyprovider.json | 1 - controls/C-0244-ensurekubernetessecretsareencrypted.json | 1 - ...-encrypttraffictohttpsloadbalancerswithtlscertificates.json | 1 - controls/C-0246-avoiduseofsystemmastersgroup.json | 1 - controls/C-0247-restrictaccesstothecontrolplaneendpoint.json | 1 - controls/C-0248-ensureclustersarecreatedwithprivatenodes.json | 1 - controls/C-0249-restrictuntrustedworkloads.json | 1 - ...izeclusteraccesstoreadonlyforazurecontainerregistryacr.json | 1 - .../C-0251-minimizeuseraccesstoazurecontainerregistryacr.json | 1 - ...eatedwithprivateendpointenabledandpublicaccessdisabled.json | 1 - controls/C-0253-deprecated-k8s-registry.json | 1 - controls/C-0254-enableauditlogs.json | 1 - controls/C-0255-workloadwithsecretaccess.json | 1 - controls/C-0256-exposuretointernet.json | 1 - controls/C-0257-pvcaccess.json | 1 - controls/C-0258-configmapaccess.json | 1 - controls/C-0259-workloadwithcredentialaccess.json | 1 - controls/C-0260-missingnetworkpolicy.json | 1 - controls/C-0261-satokenmounted.json | 1 - controls/C-0262-anonymousaccessisenabled.json | 1 - default-config-inputs.json | 1 - frameworks/__YAMLscan.json | 1 - frameworks/allcontrols.json | 1 - frameworks/armobest.json | 1 - frameworks/cis-aks-t1.2.0.json | 3 +-- frameworks/cis-eks-t1.2.0.json | 3 +-- frameworks/cis-v1.23-t1.0.1.json | 3 +-- frameworks/clusterscan.json | 1 - frameworks/devopsbest.json | 1 - frameworks/mitre.json | 1 - frameworks/nsaframework.json | 1 - frameworks/security.json | 1 - frameworks/workloadscan.json | 1 - gitregostore/gitstoreutils_test.go | 2 +- rules/CVE-2021-25741/rule.metadata.json | 1 - rules/CVE-2021-25742/rule.metadata.json | 1 - rules/CVE-2022-0185/rule.metadata.json | 1 - rules/CVE-2022-0492/rule.metadata.json | 1 - rules/CVE-2022-23648/rule.metadata.json | 1 - rules/CVE-2022-24348/rule.metadata.json | 1 - rules/CVE-2022-3172/rule.metadata.json | 1 - rules/CVE-2022-39328/rule.metadata.json | 1 - rules/CVE-2022-47633/rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/access-container-service-account-v1/rule.metadata.json | 1 - rules/access-container-service-account/rule.metadata.json | 1 - rules/alert-any-hostpath/rule.metadata.json | 3 +-- .../alert-container-optimized-os-not-in-use/rule.metadata.json | 1 - rules/alert-fargate-not-in-use/rule.metadata.json | 1 - .../alert-mount-potential-credentials-paths/rule.metadata.json | 1 - rules/alert-rw-hostpath/rule.metadata.json | 3 +-- rules/anonymous-access-enabled/rule.metadata.json | 1 - rules/anonymous-requests-to-kubelet-updated/rule.metadata.json | 1 - rules/audit-policy-content/rule.metadata.json | 1 - rules/automount-default-service-account/rule.metadata.json | 1 - rules/automount-service-account/rule.metadata.json | 1 - rules/cluster-admin-role/rule.metadata.json | 1 - rules/configmap-in-default-namespace/rule.metadata.json | 1 - rules/configured-liveness-probe/rule.metadata.json | 1 - rules/configured-readiness-probe/rule.metadata.json | 1 - rules/container-hostPort/rule.metadata.json | 1 - rules/container-image-repository-v1/rule.metadata.json | 1 - rules/container-image-repository/rule.metadata.json | 1 - rules/containers-mounting-docker-socket/rule.metadata.json | 1 - .../csistoragecapacity-in-default-namespace/rule.metadata.json | 1 - rules/drop-capability-netraw/rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/endpoints-in-default-namespace/rule.metadata.json | 1 - rules/endpointslice-in-default-namespace/rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/ensure-aws-policies-are-present/rule.metadata.json | 1 - rules/ensure-azure-rbac-is-set/rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../ensure-endpointprivateaccess-is-enabled/rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/ensure-image-scanning-enabled-cloud/rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/ensure-network-policy-is-enabled-eks/rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/etcd-auto-tls-disabled/rule.metadata.json | 1 - rules/etcd-client-auth-cert/rule.metadata.json | 1 - rules/etcd-encryption-native/rule.metadata.json | 1 - rules/etcd-peer-auto-tls-disabled/rule.metadata.json | 1 - rules/etcd-peer-client-auth-cert/rule.metadata.json | 1 - rules/etcd-peer-tls-enabled/rule.metadata.json | 1 - rules/etcd-tls-enabled/rule.metadata.json | 1 - rules/etcd-unique-ca/rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/exec-into-container-v1/rule.metadata.json | 1 - rules/exec-into-container/rule.metadata.json | 1 - rules/exposed-critical-pods/rule.metadata.json | 1 - rules/exposed-rce-pods/rule.metadata.json | 1 - rules/exposed-sensitive-interfaces-v1/rule.metadata.json | 1 - rules/exposed-sensitive-interfaces/rule.metadata.json | 1 - rules/exposure-to-internet/rule.metadata.json | 1 - rules/external-secret-storage/rule.metadata.json | 1 - rules/has-image-signature/rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/host-network-access/rule.metadata.json | 1 - rules/host-pid-ipc-privileges/rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../image-pull-policy-is-not-set-to-always/rule.metadata.json | 1 - rules/immutable-container-filesystem/rule.metadata.json | 1 - rules/ingress-and-egress-blocked/rule.metadata.json | 1 - rules/ingress-in-default-namespace/rule.metadata.json | 1 - rules/insecure-capabilities/rule.metadata.json | 1 - rules/insecure-port-flag/rule.metadata.json | 1 - rules/instance-metadata-api-access/rule.metadata.json | 1 - rules/internal-networking/rule.metadata.json | 3 +-- rules/k8s-audit-logs-enabled-cloud/rule.metadata.json | 1 - rules/k8s-audit-logs-enabled-native-cis/rule.metadata.json | 1 - rules/k8s-audit-logs-enabled-native/rule.metadata.json | 1 - rules/k8s-common-labels-usage/rule.metadata.json | 1 - .../kubelet-authorization-mode-alwaysAllow/rule.metadata.json | 1 - rules/kubelet-event-qps/rule.metadata.json | 1 - rules/kubelet-hostname-override/rule.metadata.json | 1 - rules/kubelet-ip-tables/rule.metadata.json | 1 - rules/kubelet-protect-kernel-defaults/rule.metadata.json | 1 - rules/kubelet-rotate-certificates/rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/kubelet-strong-cryptography-ciphers/rule.metadata.json | 1 - rules/label-usage-for-resources/rule.metadata.json | 1 - rules/lease-in-default-namespace/rule.metadata.json | 1 - rules/linux-hardening/rule.metadata.json | 1 - rules/list-all-mutating-webhooks/rule.metadata.json | 3 +-- rules/list-all-namespaces/rule.metadata.json | 1 - rules/list-all-validating-webhooks/rule.metadata.json | 3 +-- rules/list-role-definitions-in-acr/rule.metadata.json | 1 - rules/naked-pods/rule.metadata.json | 1 - rules/namespace-without-service-account/rule.metadata.json | 1 - rules/non-root-containers/rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/pod-security-admission-applied-1/rule.metadata.json | 1 - rules/pod-security-admission-applied-2/rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/pods-in-default-namespace/rule.metadata.json | 1 - rules/podtemplate-in-default-namespace/rule.metadata.json | 1 - rules/psp-deny-allowed-capabilities/rule.metadata.json | 1 - rules/psp-deny-allowprivilegeescalation/rule.metadata.json | 1 - rules/psp-deny-hostipc/rule.metadata.json | 1 - rules/psp-deny-hostnetwork/rule.metadata.json | 1 - rules/psp-deny-hostpid/rule.metadata.json | 1 - rules/psp-deny-privileged-container/rule.metadata.json | 1 - rules/psp-deny-root-container/rule.metadata.json | 1 - rules/psp-enabled-cloud/rule.metadata.json | 1 - rules/psp-enabled-native/rule.metadata.json | 1 - rules/psp-required-drop-capabilities/rule.metadata.json | 1 - rules/rbac-enabled-cloud/rule.metadata.json | 1 - rules/rbac-enabled-native/rule.metadata.json | 1 - rules/read-only-port-enabled-updated/rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/resource-policies/rule.metadata.json | 1 - rules/resources-cpu-limit-and-request/rule.metadata.json | 1 - rules/resources-memory-limit-and-request/rule.metadata.json | 1 - rules/resources-secret-in-default-namespace/rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../review-roles-with-aws-iam-authenticator/rule.metadata.json | 1 - rules/role-in-default-namespace/rule.metadata.json | 1 - rules/rolebinding-in-default-namespace/rule.metadata.json | 1 - rules/rule-access-dashboard-subject-v1/rule.metadata.json | 1 - rules/rule-access-dashboard-wl-v1/rule.metadata.json | 1 - rules/rule-access-dashboard/rule.metadata.json | 1 - rules/rule-allow-privilege-escalation/rule.metadata.json | 1 - rules/rule-can-bind-escalate/rule.metadata.json | 1 - rules/rule-can-create-pod/rule.metadata.json | 1 - rules/rule-can-delete-k8s-events-v1/rule.metadata.json | 1 - rules/rule-can-delete-k8s-events/rule.metadata.json | 1 - rules/rule-can-impersonate-users-groups-v1/rule.metadata.json | 1 - rules/rule-can-impersonate-users-groups/rule.metadata.json | 1 - rules/rule-can-list-get-secrets-v1/rule.metadata.json | 1 - rules/rule-can-list-get-secrets/rule.metadata.json | 1 - rules/rule-can-portforward-v1/rule.metadata.json | 1 - rules/rule-can-portforward/rule.metadata.json | 1 - rules/rule-can-ssh-to-pod-v1/rule.metadata.json | 1 - rules/rule-can-ssh-to-pod/rule.metadata.json | 1 - rules/rule-can-update-configmap-v1/rule.metadata.json | 1 - rules/rule-can-update-configmap/rule.metadata.json | 1 - rules/rule-cni-enabled-aks/rule.metadata.json | 1 - rules/rule-credentials-configmap/rule.metadata.json | 3 +-- rules/rule-credentials-in-env-var/rule.metadata.json | 3 +-- rules/rule-deny-cronjobs/rule.metadata.json | 3 +-- rules/rule-excessive-delete-rights-v1/rule.metadata.json | 1 - rules/rule-excessive-delete-rights/rule.metadata.json | 1 - rules/rule-hostile-multitenant-workloads/rule.metadata.json | 1 - .../rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/rule-identify-old-k8s-registry/rule.metadata.json | 1 - rules/rule-list-all-cluster-admins-v1/rule.metadata.json | 1 - rules/rule-list-all-cluster-admins/rule.metadata.json | 1 - rules/rule-manual/rule.metadata.json | 1 - rules/rule-privileged-container/rule.metadata.json | 1 - rules/rule-secrets-in-env-var/rule.metadata.json | 1 - rules/secret-etcd-encryption-cloud/rule.metadata.json | 1 - rules/service-in-default-namespace/rule.metadata.json | 1 - rules/serviceaccount-in-default-namespace/rule.metadata.json | 1 - rules/serviceaccount-token-mount/rule.metadata.json | 1 - rules/set-fsgroup-value/rule.metadata.json | 1 - rules/set-fsgroupchangepolicy-value/rule.metadata.json | 1 - rules/set-procmount-default/rule.metadata.json | 1 - rules/set-seLinuxOptions/rule.metadata.json | 1 - rules/set-seccomp-profile-RuntimeDefault/rule.metadata.json | 1 - rules/set-seccomp-profile/rule.metadata.json | 1 - rules/set-supplementalgroups-values/rule.metadata.json | 1 - rules/set-sysctls-params/rule.metadata.json | 1 - rules/sudo-in-container-entrypoint/rule.metadata.json | 1 - .../rule.metadata.json | 1 - rules/verify-image-signature/rule.metadata.json | 1 - rules/workload-mounted-configmap/rule.metadata.json | 1 - rules/workload-mounted-pvc/rule.metadata.json | 1 - rules/workload-mounted-secrets/rule.metadata.json | 1 - scripts/init-rule.py | 1 - 511 files changed, 13 insertions(+), 523 deletions(-) diff --git a/controls/C-0001-forbiddencontainerregistries.json b/controls/C-0001-forbiddencontainerregistries.json index 4b55ecdd0..de918c769 100644 --- a/controls/C-0001-forbiddencontainerregistries.json +++ b/controls/C-0001-forbiddencontainerregistries.json @@ -1,7 +1,6 @@ { "name": "Forbidden Container Registries", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Initial Access" ], diff --git a/controls/C-0002-execintocontainer.json b/controls/C-0002-execintocontainer.json index 139380ac1..9c0902c31 100644 --- a/controls/C-0002-execintocontainer.json +++ b/controls/C-0002-execintocontainer.json @@ -1,7 +1,6 @@ { "name": "Exec into container", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Execution" ], diff --git a/controls/C-0004-resourcesmemorylimitandrequest.json b/controls/C-0004-resourcesmemorylimitandrequest.json index afa91763f..5d1df7987 100644 --- a/controls/C-0004-resourcesmemorylimitandrequest.json +++ b/controls/C-0004-resourcesmemorylimitandrequest.json @@ -1,7 +1,6 @@ { "name": "Resources memory limit and request", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "compliance", "devops" diff --git a/controls/C-0005-apiserverinsecureportisenabled.json b/controls/C-0005-apiserverinsecureportisenabled.json index 8d4882f74..5fb3aad30 100644 --- a/controls/C-0005-apiserverinsecureportisenabled.json +++ b/controls/C-0005-apiserverinsecureportisenabled.json @@ -1,7 +1,6 @@ { "name": "API server insecure port is enabled", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0007-datadestruction.json b/controls/C-0007-datadestruction.json index 5cd33f53c..984200f71 100644 --- a/controls/C-0007-datadestruction.json +++ b/controls/C-0007-datadestruction.json @@ -1,7 +1,6 @@ { "name": "Data Destruction", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Impact" ], diff --git a/controls/C-0009-resourcelimits.json b/controls/C-0009-resourcelimits.json index cf38bf979..7ef821794 100644 --- a/controls/C-0009-resourcelimits.json +++ b/controls/C-0009-resourcelimits.json @@ -1,7 +1,6 @@ { "name": "Resource limits", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], diff --git a/controls/C-0012-applicationscredentialsinconfigurationfiles.json b/controls/C-0012-applicationscredentialsinconfigurationfiles.json index a093a3e06..8ca05f709 100644 --- a/controls/C-0012-applicationscredentialsinconfigurationfiles.json +++ b/controls/C-0012-applicationscredentialsinconfigurationfiles.json @@ -2,7 +2,6 @@ "name": "Applications credentials in configuration files", "attributes": { "actionRequired": "configuration", - "armoBuiltin": true, "microsoftMitreColumns": [ "Credential access", "Lateral Movement" diff --git a/controls/C-0013-nonrootcontainers.json b/controls/C-0013-nonrootcontainers.json index a60e15ea8..ac86fbc44 100644 --- a/controls/C-0013-nonrootcontainers.json +++ b/controls/C-0013-nonrootcontainers.json @@ -1,7 +1,6 @@ { "name": "Non-root containers", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0014-accesskubernetesdashboard.json b/controls/C-0014-accesskubernetesdashboard.json index a73388f62..05740d557 100644 --- a/controls/C-0014-accesskubernetesdashboard.json +++ b/controls/C-0014-accesskubernetesdashboard.json @@ -1,7 +1,6 @@ { "name": "Access Kubernetes dashboard", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Discovery", "Lateral Movement" diff --git a/controls/C-0015-listkubernetessecrets.json b/controls/C-0015-listkubernetessecrets.json index f990a4b71..1f0203802 100644 --- a/controls/C-0015-listkubernetessecrets.json +++ b/controls/C-0015-listkubernetessecrets.json @@ -1,7 +1,6 @@ { "name": "List Kubernetes secrets", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Credential access" ], diff --git a/controls/C-0016-allowprivilegeescalation.json b/controls/C-0016-allowprivilegeescalation.json index c6fcd7665..383059b1c 100644 --- a/controls/C-0016-allowprivilegeescalation.json +++ b/controls/C-0016-allowprivilegeescalation.json @@ -1,7 +1,6 @@ { "name": "Allow privilege escalation", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance", diff --git a/controls/C-0017-immutablecontainerfilesystem.json b/controls/C-0017-immutablecontainerfilesystem.json index 45b21bd7f..657d9dad7 100644 --- a/controls/C-0017-immutablecontainerfilesystem.json +++ b/controls/C-0017-immutablecontainerfilesystem.json @@ -1,7 +1,6 @@ { "name": "Immutable container filesystem", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance", diff --git a/controls/C-0018-configuredreadinessprobe.json b/controls/C-0018-configuredreadinessprobe.json index 160e7e2dd..1be332d3a 100644 --- a/controls/C-0018-configuredreadinessprobe.json +++ b/controls/C-0018-configuredreadinessprobe.json @@ -1,7 +1,6 @@ { "name": "Configured readiness probe", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "devops" ] diff --git a/controls/C-0020-mountserviceprincipal.json b/controls/C-0020-mountserviceprincipal.json index 4f0c8cbf1..91b58039f 100644 --- a/controls/C-0020-mountserviceprincipal.json +++ b/controls/C-0020-mountserviceprincipal.json @@ -1,7 +1,6 @@ { "name": "Mount service principal", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Credential Access" ], diff --git a/controls/C-0021-exposedsensitiveinterfaces.json b/controls/C-0021-exposedsensitiveinterfaces.json index 13b202a97..d606eee15 100644 --- a/controls/C-0021-exposedsensitiveinterfaces.json +++ b/controls/C-0021-exposedsensitiveinterfaces.json @@ -2,7 +2,6 @@ "name": "Exposed sensitive interfaces", "attributes": { "actionRequired": "configuration", - "armoBuiltin": true, "microsoftMitreColumns": [ "Initial access" ], diff --git a/controls/C-0026-kubernetescronjob.json b/controls/C-0026-kubernetescronjob.json index 3f8ebbc7a..fdff6848d 100644 --- a/controls/C-0026-kubernetescronjob.json +++ b/controls/C-0026-kubernetescronjob.json @@ -1,7 +1,6 @@ { "name": "Kubernetes CronJob", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Persistence" ], diff --git a/controls/C-0030-ingressandegressblocked.json b/controls/C-0030-ingressandegressblocked.json index 98afdec86..56196673d 100644 --- a/controls/C-0030-ingressandegressblocked.json +++ b/controls/C-0030-ingressandegressblocked.json @@ -1,7 +1,6 @@ { "name": "Ingress and Egress blocked", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "compliance" ] diff --git a/controls/C-0031-deletekubernetesevents.json b/controls/C-0031-deletekubernetesevents.json index 149ec2beb..f862b18fd 100644 --- a/controls/C-0031-deletekubernetesevents.json +++ b/controls/C-0031-deletekubernetesevents.json @@ -1,7 +1,6 @@ { "name": "Delete Kubernetes events", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Defense evasion" ], diff --git a/controls/C-0034-automaticmappingofserviceaccount.json b/controls/C-0034-automaticmappingofserviceaccount.json index bcea7277f..01a0b146f 100644 --- a/controls/C-0034-automaticmappingofserviceaccount.json +++ b/controls/C-0034-automaticmappingofserviceaccount.json @@ -1,7 +1,6 @@ { "name": "Automatic mapping of service account", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance", diff --git a/controls/C-0035-clusteradminbinding.json b/controls/C-0035-clusteradminbinding.json index 74eb05741..8e6cebce6 100644 --- a/controls/C-0035-clusteradminbinding.json +++ b/controls/C-0035-clusteradminbinding.json @@ -1,7 +1,6 @@ { "name": "Cluster-admin binding", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Privilege escalation" ], diff --git a/controls/C-0036-maliciousadmissioncontrollervalidating.json b/controls/C-0036-maliciousadmissioncontrollervalidating.json index 61a4660a8..2ed288707 100644 --- a/controls/C-0036-maliciousadmissioncontrollervalidating.json +++ b/controls/C-0036-maliciousadmissioncontrollervalidating.json @@ -1,7 +1,6 @@ { "name": "Validate admission controller (validating)", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Credential access" ], diff --git a/controls/C-0037-corednspoisoning.json b/controls/C-0037-corednspoisoning.json index bd0bbf9b5..3eb69d04b 100644 --- a/controls/C-0037-corednspoisoning.json +++ b/controls/C-0037-corednspoisoning.json @@ -1,7 +1,6 @@ { "name": "CoreDNS poisoning", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Lateral Movement" ], diff --git a/controls/C-0038-hostpidipcprivileges.json b/controls/C-0038-hostpidipcprivileges.json index 880f1b975..80a86f0b5 100644 --- a/controls/C-0038-hostpidipcprivileges.json +++ b/controls/C-0038-hostpidipcprivileges.json @@ -1,7 +1,6 @@ { "name": "Host PID/IPC privileges", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0039-maliciousadmissioncontrollermutating.json b/controls/C-0039-maliciousadmissioncontrollermutating.json index cc3cdfba2..3fb7e2290 100644 --- a/controls/C-0039-maliciousadmissioncontrollermutating.json +++ b/controls/C-0039-maliciousadmissioncontrollermutating.json @@ -1,7 +1,6 @@ { "name": "Validate admission controller (mutating)", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Persistence" ], diff --git a/controls/C-0041-hostnetworkaccess.json b/controls/C-0041-hostnetworkaccess.json index 739aaf032..580e32f69 100644 --- a/controls/C-0041-hostnetworkaccess.json +++ b/controls/C-0041-hostnetworkaccess.json @@ -1,7 +1,6 @@ { "name": "HostNetwork access", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0042-sshserverrunninginsidecontainer.json b/controls/C-0042-sshserverrunninginsidecontainer.json index 702157a64..2163c6961 100644 --- a/controls/C-0042-sshserverrunninginsidecontainer.json +++ b/controls/C-0042-sshserverrunninginsidecontainer.json @@ -1,7 +1,6 @@ { "name": "SSH server running inside container", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Execution" ], diff --git a/controls/C-0044-containerhostport.json b/controls/C-0044-containerhostport.json index 89b18b9c7..1ad44feec 100644 --- a/controls/C-0044-containerhostport.json +++ b/controls/C-0044-containerhostport.json @@ -1,7 +1,6 @@ { "name": "Container hostPort", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance", diff --git a/controls/C-0045-writablehostpathmount.json b/controls/C-0045-writablehostpathmount.json index 046a39d12..bed14f51a 100644 --- a/controls/C-0045-writablehostpathmount.json +++ b/controls/C-0045-writablehostpathmount.json @@ -1,7 +1,6 @@ { "name": "Writable hostPath mount", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Persistence", "Lateral Movement" diff --git a/controls/C-0046-insecurecapabilities.json b/controls/C-0046-insecurecapabilities.json index 6602ac291..a4cb3ab46 100644 --- a/controls/C-0046-insecurecapabilities.json +++ b/controls/C-0046-insecurecapabilities.json @@ -2,7 +2,6 @@ "name": "Insecure capabilities", "attributes": { "actionRequired": "configuration", - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance", diff --git a/controls/C-0048-hostpathmount.json b/controls/C-0048-hostpathmount.json index ca4927470..6e687b6d3 100644 --- a/controls/C-0048-hostpathmount.json +++ b/controls/C-0048-hostpathmount.json @@ -1,7 +1,6 @@ { "name": "HostPath mount", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Privilege escalation" ], diff --git a/controls/C-0049-networkmapping.json b/controls/C-0049-networkmapping.json index 4ed7e4c2a..8a0e662e7 100644 --- a/controls/C-0049-networkmapping.json +++ b/controls/C-0049-networkmapping.json @@ -1,7 +1,6 @@ { "name": "Network mapping", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Discovery" ], diff --git a/controls/C-0050-resourcescpulimitandrequest.json b/controls/C-0050-resourcescpulimitandrequest.json index 5084d6866..984d06d34 100644 --- a/controls/C-0050-resourcescpulimitandrequest.json +++ b/controls/C-0050-resourcescpulimitandrequest.json @@ -2,7 +2,6 @@ "name": "Resources CPU limit and request", "attributes": { "actionRequired": "configuration", - "armoBuiltin": true, "controlTypeTags": [ "compliance", "devops" diff --git a/controls/C-0052-instancemetadataapi.json b/controls/C-0052-instancemetadataapi.json index 0c8718562..9db8f8d5c 100644 --- a/controls/C-0052-instancemetadataapi.json +++ b/controls/C-0052-instancemetadataapi.json @@ -1,7 +1,6 @@ { "name": "Instance Metadata API", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Discovery" ], diff --git a/controls/C-0053-accesscontainerserviceaccount.json b/controls/C-0053-accesscontainerserviceaccount.json index a99e952ee..4586605fa 100644 --- a/controls/C-0053-accesscontainerserviceaccount.json +++ b/controls/C-0053-accesscontainerserviceaccount.json @@ -1,7 +1,6 @@ { "name": "Access container service account", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Credential access" ], diff --git a/controls/C-0054-clusterinternalnetworking.json b/controls/C-0054-clusterinternalnetworking.json index f2ac99d2e..d4ab27bf8 100644 --- a/controls/C-0054-clusterinternalnetworking.json +++ b/controls/C-0054-clusterinternalnetworking.json @@ -1,7 +1,6 @@ { "name": "Cluster internal networking", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Lateral movement" ], diff --git a/controls/C-0055-linuxhardening.json b/controls/C-0055-linuxhardening.json index 996b5fe57..75e374a56 100644 --- a/controls/C-0055-linuxhardening.json +++ b/controls/C-0055-linuxhardening.json @@ -1,7 +1,6 @@ { "name": "Linux hardening", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0056-configuredlivenessprobe.json b/controls/C-0056-configuredlivenessprobe.json index 88fef2132..f2191926b 100644 --- a/controls/C-0056-configuredlivenessprobe.json +++ b/controls/C-0056-configuredlivenessprobe.json @@ -1,7 +1,6 @@ { "name": "Configured liveness probe", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "devops" ] diff --git a/controls/C-0057-privilegedcontainer.json b/controls/C-0057-privilegedcontainer.json index 6cb4a2074..c4565b51a 100644 --- a/controls/C-0057-privilegedcontainer.json +++ b/controls/C-0057-privilegedcontainer.json @@ -1,7 +1,6 @@ { "name": "Privileged container", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Privilege escalation" ], diff --git a/controls/C-0058-cve202125741usingsymlinkforarbitraryhostfilesystemaccess.json b/controls/C-0058-cve202125741usingsymlinkforarbitraryhostfilesystemaccess.json index e57b4fd03..1c2c27134 100644 --- a/controls/C-0058-cve202125741usingsymlinkforarbitraryhostfilesystemaccess.json +++ b/controls/C-0058-cve202125741usingsymlinkforarbitraryhostfilesystemaccess.json @@ -1,7 +1,6 @@ { "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0059-cve202125742nginxingresssnippetannotationvulnerability.json b/controls/C-0059-cve202125742nginxingresssnippetannotationvulnerability.json index 5cc664567..95ec4dbee 100644 --- a/controls/C-0059-cve202125742nginxingresssnippetannotationvulnerability.json +++ b/controls/C-0059-cve202125742nginxingresssnippetannotationvulnerability.json @@ -1,7 +1,6 @@ { "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0061-podsindefaultnamespace.json b/controls/C-0061-podsindefaultnamespace.json index e2301b080..e2abdff72 100644 --- a/controls/C-0061-podsindefaultnamespace.json +++ b/controls/C-0061-podsindefaultnamespace.json @@ -1,7 +1,6 @@ { "name": "Pods in default namespace", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "compliance", "devops" diff --git a/controls/C-0062-sudoincontainerentrypoint.json b/controls/C-0062-sudoincontainerentrypoint.json index 2a8b27c5e..43c2fe44f 100644 --- a/controls/C-0062-sudoincontainerentrypoint.json +++ b/controls/C-0062-sudoincontainerentrypoint.json @@ -1,7 +1,6 @@ { "name": "Sudo in container entrypoint", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ] diff --git a/controls/C-0063-portforwardingprivileges.json b/controls/C-0063-portforwardingprivileges.json index 5d24a32e1..905759773 100644 --- a/controls/C-0063-portforwardingprivileges.json +++ b/controls/C-0063-portforwardingprivileges.json @@ -1,7 +1,6 @@ { "name": "Portforwarding privileges", "attributes": { - "armoBuiltin": true, "rbacQuery": "Port Forwarding", "controlTypeTags": [ "security-impact", diff --git a/controls/C-0065-noimpersonation.json b/controls/C-0065-noimpersonation.json index 4d677e49f..e1e872cf8 100644 --- a/controls/C-0065-noimpersonation.json +++ b/controls/C-0065-noimpersonation.json @@ -1,7 +1,6 @@ { "name": "No impersonation", "attributes": { - "armoBuiltin": true, "rbacQuery": "Impersonation", "controlTypeTags": [ "security", diff --git a/controls/C-0066-secretetcdencryptionenabled.json b/controls/C-0066-secretetcdencryptionenabled.json index 24880d5ec..cf6a902cf 100644 --- a/controls/C-0066-secretetcdencryptionenabled.json +++ b/controls/C-0066-secretetcdencryptionenabled.json @@ -1,7 +1,6 @@ { "name": "Secret/etcd encryption enabled", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0067-auditlogsenabled.json b/controls/C-0067-auditlogsenabled.json index e90541c9d..8632d19b4 100644 --- a/controls/C-0067-auditlogsenabled.json +++ b/controls/C-0067-auditlogsenabled.json @@ -1,7 +1,6 @@ { "name": "Audit logs enabled", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0068-pspenabled.json b/controls/C-0068-pspenabled.json index b18ede310..4ac34744c 100644 --- a/controls/C-0068-pspenabled.json +++ b/controls/C-0068-pspenabled.json @@ -1,7 +1,6 @@ { "name": "PSP enabled", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0069-disableanonymousaccesstokubeletservice.json b/controls/C-0069-disableanonymousaccesstokubeletservice.json index 19759145b..02ade8374 100644 --- a/controls/C-0069-disableanonymousaccesstokubeletservice.json +++ b/controls/C-0069-disableanonymousaccesstokubeletservice.json @@ -1,7 +1,6 @@ { "name": "Disable anonymous access to Kubelet service", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0070-enforcekubeletclienttlsauthentication.json b/controls/C-0070-enforcekubeletclienttlsauthentication.json index fc57cbc58..a9f272927 100644 --- a/controls/C-0070-enforcekubeletclienttlsauthentication.json +++ b/controls/C-0070-enforcekubeletclienttlsauthentication.json @@ -1,7 +1,6 @@ { "name": "Enforce Kubelet client TLS authentication", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0073-nakedpods.json b/controls/C-0073-nakedpods.json index 6782d3900..fae18d3b0 100644 --- a/controls/C-0073-nakedpods.json +++ b/controls/C-0073-nakedpods.json @@ -1,7 +1,6 @@ { "name": "Naked pods", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "devops" ] diff --git a/controls/C-0074-containersmountingdockersocket.json b/controls/C-0074-containersmountingdockersocket.json index 4551f3c32..7126ff12c 100644 --- a/controls/C-0074-containersmountingdockersocket.json +++ b/controls/C-0074-containersmountingdockersocket.json @@ -1,7 +1,6 @@ { "name": "Container runtime socket mounted", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "devops", "smartRemediation" diff --git a/controls/C-0075-imagepullpolicyonlatesttag.json b/controls/C-0075-imagepullpolicyonlatesttag.json index 0ecca02c2..b02d5df6f 100644 --- a/controls/C-0075-imagepullpolicyonlatesttag.json +++ b/controls/C-0075-imagepullpolicyonlatesttag.json @@ -1,7 +1,6 @@ { "name": "Image pull policy on latest tag", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "devops" ] diff --git a/controls/C-0076-labelusageforresources.json b/controls/C-0076-labelusageforresources.json index 105baa26a..e885a9080 100644 --- a/controls/C-0076-labelusageforresources.json +++ b/controls/C-0076-labelusageforresources.json @@ -2,7 +2,6 @@ "name": "Label usage for resources", "attributes": { "actionRequired": "configuration", - "armoBuiltin": true, "controlTypeTags": [ "devops" ] diff --git a/controls/C-0077-k8scommonlabelsusage.json b/controls/C-0077-k8scommonlabelsusage.json index d3645ac56..3e2a61d3e 100644 --- a/controls/C-0077-k8scommonlabelsusage.json +++ b/controls/C-0077-k8scommonlabelsusage.json @@ -2,7 +2,6 @@ "name": "K8s common labels usage", "attributes": { "actionRequired": "configuration", - "armoBuiltin": true, "controlTypeTags": [ "devops" ] diff --git a/controls/C-0078-imagesfromallowedregistry.json b/controls/C-0078-imagesfromallowedregistry.json index dce18ebe3..4011590c7 100644 --- a/controls/C-0078-imagesfromallowedregistry.json +++ b/controls/C-0078-imagesfromallowedregistry.json @@ -2,7 +2,6 @@ "name": "Images from allowed registry", "attributes": { "actionRequired": "configuration", - "armoBuiltin": true, "microsoftMitreColumns": [ "Collection" ], diff --git a/controls/C-0079-cve20220185linuxkernelcontainerescape.json b/controls/C-0079-cve20220185linuxkernelcontainerescape.json index b135f9d07..cd0d53126 100644 --- a/controls/C-0079-cve20220185linuxkernelcontainerescape.json +++ b/controls/C-0079-cve20220185linuxkernelcontainerescape.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-0185-linux-kernel-container-escape", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0081-cve202224348argocddirtraversal.json b/controls/C-0081-cve202224348argocddirtraversal.json index 9a3fe859d..8dd9fa520 100644 --- a/controls/C-0081-cve202224348argocddirtraversal.json +++ b/controls/C-0081-cve202224348argocddirtraversal.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-24348-argocddirtraversal", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ] diff --git a/controls/C-0083-workloadswithcriticalvulnerabilitiesexposedtoexternaltraffic.json b/controls/C-0083-workloadswithcriticalvulnerabilitiesexposedtoexternaltraffic.json index 72199d3d7..2479eff06 100644 --- a/controls/C-0083-workloadswithcriticalvulnerabilitiesexposedtoexternaltraffic.json +++ b/controls/C-0083-workloadswithcriticalvulnerabilitiesexposedtoexternaltraffic.json @@ -1,7 +1,6 @@ { "name": "Workloads with Critical vulnerabilities exposed to external traffic", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ] diff --git a/controls/C-0084-workloadswithrcevulnerabilitiesexposedtoexternaltraffic.json b/controls/C-0084-workloadswithrcevulnerabilitiesexposedtoexternaltraffic.json index f67d95544..7dbc746e9 100644 --- a/controls/C-0084-workloadswithrcevulnerabilitiesexposedtoexternaltraffic.json +++ b/controls/C-0084-workloadswithrcevulnerabilitiesexposedtoexternaltraffic.json @@ -1,7 +1,6 @@ { "name": "Workloads with RCE vulnerabilities exposed to external traffic", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0085-workloadswithexcessiveamountofvulnerabilities.json b/controls/C-0085-workloadswithexcessiveamountofvulnerabilities.json index cea0bfb7e..e0a3c3f55 100644 --- a/controls/C-0085-workloadswithexcessiveamountofvulnerabilities.json +++ b/controls/C-0085-workloadswithexcessiveamountofvulnerabilities.json @@ -2,7 +2,6 @@ "name": "Workloads with excessive amount of vulnerabilities", "attributes": { "actionRequired": "configuration", - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0086-cve20220492cgroupscontainerescape.json b/controls/C-0086-cve20220492cgroupscontainerescape.json index 65b5688f1..51772380a 100644 --- a/controls/C-0086-cve20220492cgroupscontainerescape.json +++ b/controls/C-0086-cve20220492cgroupscontainerescape.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-0492-cgroups-container-escape", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0087-cve202223648containerdfsescape.json b/controls/C-0087-cve202223648containerdfsescape.json index d850d7f1a..30a5cf28a 100644 --- a/controls/C-0087-cve202223648containerdfsescape.json +++ b/controls/C-0087-cve202223648containerdfsescape.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-23648-containerd-fs-escape", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ] diff --git a/controls/C-0088-rbacenabled.json b/controls/C-0088-rbacenabled.json index 5a85b4db0..44c895b1b 100644 --- a/controls/C-0088-rbacenabled.json +++ b/controls/C-0088-rbacenabled.json @@ -1,7 +1,6 @@ { "name": "RBAC enabled", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0089-cve20223172aggregatedapiserverredirect.json b/controls/C-0089-cve20223172aggregatedapiserverredirect.json index 5509df70c..a65e59bea 100644 --- a/controls/C-0089-cve20223172aggregatedapiserverredirect.json +++ b/controls/C-0089-cve20223172aggregatedapiserverredirect.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-3172-aggregated-API-server-redirect", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], diff --git a/controls/C-0090-cve202239328grafanaauthbypass.json b/controls/C-0090-cve202239328grafanaauthbypass.json index 1191ce466..fcbde74a6 100644 --- a/controls/C-0090-cve202239328grafanaauthbypass.json +++ b/controls/C-0090-cve202239328grafanaauthbypass.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-39328-grafana-auth-bypass", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ] diff --git a/controls/C-0091-cve202247633kyvernosignaturebypass.json b/controls/C-0091-cve202247633kyvernosignaturebypass.json index ac49c011c..26d40f3be 100644 --- a/controls/C-0091-cve202247633kyvernosignaturebypass.json +++ b/controls/C-0091-cve202247633kyvernosignaturebypass.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-47633-kyverno-signature-bypass", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ] diff --git a/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json index 83585738c..70561ce3f 100644 --- a/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json b/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json index 2dbe3d2b4..3385a032c 100644 --- a/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json index eb1d15e95..12fe8ca25 100644 --- a/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json b/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json index e02de4b08..7b3bc3aa9 100644 --- a/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json index b5581de00..e4d57ae1b 100644 --- a/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json b/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json index 0b2c5aa11..a7a99ff82 100644 --- a/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json index 183e05409..878fa443f 100644 --- a/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json b/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json index b9bda38cc..65c070d50 100644 --- a/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json b/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json index d7ad41f35..a667f6ec1 100644 --- a/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive" diff --git a/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json b/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json index 6af932e5f..30161b04e 100644 --- a/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json +++ b/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root" diff --git a/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json b/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json index 3c960b0c4..464089744 100644 --- a/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json +++ b/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json @@ -12,7 +12,6 @@ "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 7, "impact_statement": "None", diff --git a/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json b/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json index a314344f8..c31dcaa0a 100644 --- a/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json +++ b/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd" diff --git a/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json b/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json index 9a02cc1ab..b934be74e 100644 --- a/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json +++ b/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json @@ -12,7 +12,6 @@ "ensure-that-the-admin.conf-file-permissions-are-set-to-600" ], "attributes": { - "armoBuiltin": true }, "baseScore": 7, "impact_statement": "None.", diff --git a/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json b/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json index b4a0c6800..f445a1432 100644 --- a/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json +++ b/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 7, "impact_statement": "None.", diff --git a/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json b/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json index ee0881b49..2655c7c12 100644 --- a/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json b/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json index ec2f43577..9552e189e 100644 --- a/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json +++ b/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json b/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json index 0908a7c2d..700526c95 100644 --- a/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json b/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json index cd6dbd8f9..44c4233e7 100644 --- a/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json +++ b/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json b/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json index 0848e6b5f..7199fa0f9 100644 --- a/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json +++ b/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 8, "impact_statement": "None", diff --git a/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json b/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json index 35d726979..39ed1b914 100644 --- a/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 8, "impact_statement": "None", diff --git a/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json b/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json index e55dd8fd4..c70fa9379 100644 --- a/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json +++ b/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json @@ -12,7 +12,6 @@ "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600" ], "attributes": { - "armoBuiltin": true }, "baseScore": 8, "impact_statement": "None", diff --git a/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json b/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json index 2c55415c8..c021bfbac 100644 --- a/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json +++ b/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false" diff --git a/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json b/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json index dde0314d9..7dc3e89da 100644 --- a/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json +++ b/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-token-auth-file-parameter-is-not-set" diff --git a/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json b/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json index fca74ebde..9ed3be63c 100644 --- a/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json +++ b/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set" diff --git a/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json b/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json index 908414850..d9d3a8312 100644 --- a/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json +++ b/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate" diff --git a/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json b/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json index 350c03104..1e5007181 100644 --- a/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json +++ b/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate" diff --git a/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json b/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json index e950ca3a5..3fe1bd12c 100644 --- a/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json +++ b/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow" diff --git a/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json b/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json index 5cb48553d..403c86f61 100644 --- a/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json +++ b/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-authorization-mode-argument-includes-Node" diff --git a/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json b/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json index 200341870..3a9309c13 100644 --- a/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json +++ b/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC" diff --git a/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json b/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json index 9e50895bf..c2016a57b 100644 --- a/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json +++ b/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-admission-control-plugin-EventRateLimit-is-set" diff --git a/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json b/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json index 5a4e284ee..94d13db31 100644 --- a/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json +++ b/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set" diff --git a/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json b/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json index ff57e1385..a1b28828a 100644 --- a/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json +++ b/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set" diff --git a/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json b/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json index 71801308b..0f8ad054e 100644 --- a/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json +++ b/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used" diff --git a/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json b/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json index 4887e8abd..e6ccd01b2 100644 --- a/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json +++ b/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-admission-control-plugin-ServiceAccount-is-set" diff --git a/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json b/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json index 4ed20d813..89a6f89cf 100644 --- a/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json +++ b/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set" diff --git a/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json b/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json index 08815eafa..732fc7934 100644 --- a/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json +++ b/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-admission-control-plugin-NodeRestriction-is-set" diff --git a/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json b/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json index 9f9712d53..084e651c8 100644 --- a/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json +++ b/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0" diff --git a/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json b/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json index 39d1ac300..4717aab65 100644 --- a/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json +++ b/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-profiling-argument-is-set-to-false" diff --git a/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json b/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json index d46ce48d8..3bce2ca73 100644 --- a/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json +++ b/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-audit-log-path-argument-is-set" diff --git a/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json b/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json index aaad447eb..5d754c477 100644 --- a/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json +++ b/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate" diff --git a/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json b/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json index dcd60ae44..9da6c5b5d 100644 --- a/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json +++ b/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate" diff --git a/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json b/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json index 5dcb8fc50..3b8c20b7d 100644 --- a/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json +++ b/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate" diff --git a/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json b/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json index ace65d7c3..47a2fd30b 100644 --- a/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json +++ b/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate" diff --git a/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json b/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json index 7a62a5a5c..7e1ef2c91 100644 --- a/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json +++ b/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true" diff --git a/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json b/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json index a45bd2882..3db936f6b 100644 --- a/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json +++ b/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate" diff --git a/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json b/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json index f9bf27741..b036696cf 100644 --- a/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json +++ b/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate" diff --git a/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json b/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json index 512dcd8cf..5109ef0a6 100644 --- a/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json +++ b/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate" diff --git a/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json b/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json index f76647d74..16d54997d 100644 --- a/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json +++ b/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate" diff --git a/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json b/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json index 90f6ce961..4a598bb5f 100644 --- a/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json +++ b/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate" diff --git a/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json b/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json index ecd759843..75260ca96 100644 --- a/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json +++ b/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate" diff --git a/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json b/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json index a3ecf1a73..88ecde7da 100644 --- a/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json +++ b/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-api-server-encryption-providers-are-appropriately-configured" diff --git a/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json b/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json index f7133e991..6fd581ab3 100644 --- a/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json +++ b/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers" diff --git a/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json b/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json index 79a973396..e43c72438 100644 --- a/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json +++ b/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate" diff --git a/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json b/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json index a7beb6144..4b045fe06 100644 --- a/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json +++ b/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-controller-manager-profiling-argument-is-set-to-false" diff --git a/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json b/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json index 83edbab75..722356eb6 100644 --- a/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json +++ b/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true" diff --git a/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json b/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json index d663d0013..3d22abaaf 100644 --- a/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json +++ b/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate" diff --git a/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json b/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json index d2a44513e..71adea6b4 100644 --- a/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json +++ b/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate" diff --git a/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json b/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json index 3f4a4c173..beb379523 100644 --- a/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json +++ b/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true" diff --git a/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json b/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json index 666237258..10b242b04 100644 --- a/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json +++ b/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1" diff --git a/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json b/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json index ad3b207f1..466486b94 100644 --- a/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json +++ b/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-scheduler-profiling-argument-is-set-to-false" diff --git a/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json b/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json index f4498708c..8740f53ab 100644 --- a/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json +++ b/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1" diff --git a/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json b/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json index 59da2a849..6d1a62835 100644 --- a/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json +++ b/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "etcd-tls-enabled" diff --git a/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json b/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json index ae94607fd..68da5f5e2 100644 --- a/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json +++ b/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "etcd-client-auth-cert" diff --git a/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json b/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json index 14cbae1c1..e06cade47 100644 --- a/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json +++ b/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "etcd-auto-tls-disabled" diff --git a/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json b/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json index 82c54ff33..740d3d44e 100644 --- a/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json +++ b/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "etcd-peer-tls-enabled" diff --git a/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json b/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json index 72d0072c3..582114816 100644 --- a/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json +++ b/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "etcd-peer-client-auth-cert" diff --git a/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json b/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json index 6c5711651..f74eb0fef 100644 --- a/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json +++ b/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "etcd-peer-auto-tls-disabled" diff --git a/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json b/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json index cc1a39a13..47588b6af 100644 --- a/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json +++ b/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "etcd-unique-ca" diff --git a/controls/C-0160-ensurethataminimalauditpolicyiscreated.json b/controls/C-0160-ensurethataminimalauditpolicyiscreated.json index 9aaf518fa..50afb0b3d 100644 --- a/controls/C-0160-ensurethataminimalauditpolicyiscreated.json +++ b/controls/C-0160-ensurethataminimalauditpolicyiscreated.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "k8s-audit-logs-enabled-native-cis" diff --git a/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json b/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json index 116fd1974..9958dd636 100644 --- a/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json +++ b/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "audit-policy-content" diff --git a/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json b/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json index 5ae6b9c7b..3a2c382d2 100644 --- a/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive" diff --git a/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json b/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json index 4c9b2dfd1..de874ec6e 100644 --- a/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json +++ b/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root" diff --git a/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json b/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json index c5ba8caa6..77ebe3f66 100644 --- a/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json b/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json index 440285c4a..e8982e7cc 100644 --- a/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json +++ b/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json @@ -12,7 +12,6 @@ "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json b/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json index 5f1a11289..7660486c5 100644 --- a/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json b/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json index e3f6b972e..ed13229bf 100644 --- a/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json +++ b/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 6, "impact_statement": "None", diff --git a/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json index e7fd6c874..242eca8d6 100644 --- a/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 7, "impact_statement": "None", diff --git a/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json b/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json index f02c6fee6..7b9b3a42a 100644 --- a/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json +++ b/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 7, "impact_statement": "None", diff --git a/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json b/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json index 1935aa234..635d11361 100644 --- a/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json +++ b/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json @@ -12,7 +12,6 @@ "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive" ], "attributes": { - "armoBuiltin": true }, "baseScore": 7, "impact_statement": "None", diff --git a/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json b/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json index f5747f9dd..f25327e54 100644 --- a/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json +++ b/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json @@ -12,7 +12,6 @@ "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root" ], "attributes": { - "armoBuiltin": true }, "baseScore": 7, "impact_statement": "None", diff --git a/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json b/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json index 418ef34f6..90b330298 100644 --- a/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json +++ b/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "anonymous-requests-to-kubelet-service-updated" diff --git a/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json b/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json index 5315e42a0..9726af5d9 100644 --- a/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json +++ b/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "kubelet-authorization-mode-alwaysAllow" diff --git a/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json b/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json index e629a726f..008e5ae76 100644 --- a/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json +++ b/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "enforce-kubelet-client-tls-authentication-updated" diff --git a/controls/C-0175-verifythatthereadonlyportargumentissetto0.json b/controls/C-0175-verifythatthereadonlyportargumentissetto0.json index 47d7617a2..6821ef5ae 100644 --- a/controls/C-0175-verifythatthereadonlyportargumentissetto0.json +++ b/controls/C-0175-verifythatthereadonlyportargumentissetto0.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "read-only-port-enabled-updated" diff --git a/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json b/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json index d5f6d5fd5..cac14e689 100644 --- a/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json +++ b/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "kubelet-streaming-connection-idle-timeout" diff --git a/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json b/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json index fc5aad85c..58c18fbd5 100644 --- a/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json +++ b/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "kubelet-protect-kernel-defaults" diff --git a/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json b/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json index 5872494fa..625358396 100644 --- a/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json +++ b/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "kubelet-ip-tables" diff --git a/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json b/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json index 68573eb20..d08adebc6 100644 --- a/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json +++ b/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "kubelet-hostname-override" diff --git a/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json b/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json index edb7998f3..34020fe44 100644 --- a/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json +++ b/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "kubelet-event-qps" diff --git a/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json b/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json index 41004e0c8..330f8632d 100644 --- a/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json +++ b/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "validate-kubelet-tls-configuration-updated" diff --git a/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json b/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json index ac1e8f2df..1836bf526 100644 --- a/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json +++ b/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "kubelet-rotate-certificates" diff --git a/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json b/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json index a2e3eeb54..3de6f9ad2 100644 --- a/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json +++ b/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "kubelet-rotate-kubelet-server-certificate" diff --git a/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json b/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json index 1a858e92f..5df7efaee 100644 --- a/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json +++ b/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "kubelet-strong-cryptographics-ciphers" diff --git a/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json b/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json index e591e9553..5a0ab6578 100644 --- a/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json +++ b/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "cluster-admin-role" diff --git a/controls/C-0186-minimizeaccesstosecrets.json b/controls/C-0186-minimizeaccesstosecrets.json index d8f0fc3bb..6848b5268 100644 --- a/controls/C-0186-minimizeaccesstosecrets.json +++ b/controls/C-0186-minimizeaccesstosecrets.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "rule-can-list-get-secrets-v1" diff --git a/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json b/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json index 1a09b3e9c..19e97ef8f 100644 --- a/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json +++ b/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "rule-list-all-cluster-admins-v1" diff --git a/controls/C-0188-minimizeaccesstocreatepods.json b/controls/C-0188-minimizeaccesstocreatepods.json index 78da8ae95..c1cd2293a 100644 --- a/controls/C-0188-minimizeaccesstocreatepods.json +++ b/controls/C-0188-minimizeaccesstocreatepods.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "rule-can-create-pod" diff --git a/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json b/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json index 1e1a560b3..6aba37cd6 100644 --- a/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json +++ b/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "automount-default-service-account", diff --git a/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json b/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json index 8f69e21cb..3d03705f1 100644 --- a/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json +++ b/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "automount-service-account" diff --git a/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json b/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json index 799121bf5..80af64a7e 100644 --- a/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json +++ b/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "rule-can-bind-escalate", diff --git a/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json b/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json index fedc5df46..8fd428237 100644 --- a/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json +++ b/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-applied-1", diff --git a/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json b/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json index 5ddbdc34b..66bbe4015 100644 --- a/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json +++ b/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-baseline-applied-1", diff --git a/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json b/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json index a3cba616a..a8215e14e 100644 --- a/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json +++ b/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-baseline-applied-1", diff --git a/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json b/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json index 26f63ca44..ce44b97c2 100644 --- a/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json +++ b/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-baseline-applied-1", diff --git a/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json b/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json index 2e4d69dd0..1ba67e9ac 100644 --- a/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json +++ b/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-baseline-applied-1", diff --git a/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json b/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json index 811f1726d..c409c8c9b 100644 --- a/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json +++ b/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-restricted-applied-1", diff --git a/controls/C-0198-minimizetheadmissionofrootcontainers.json b/controls/C-0198-minimizetheadmissionofrootcontainers.json index c837df9cf..c7c4831bb 100644 --- a/controls/C-0198-minimizetheadmissionofrootcontainers.json +++ b/controls/C-0198-minimizetheadmissionofrootcontainers.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-restricted-applied-1", diff --git a/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json b/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json index 76f59df0d..fe20c7827 100644 --- a/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json +++ b/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-baseline-applied-1", diff --git a/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json b/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json index c106ea32c..4baddf253 100644 --- a/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json +++ b/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-restricted-applied-1", diff --git a/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json b/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json index 18b9a72b3..9562269c8 100644 --- a/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json +++ b/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-restricted-applied-1", diff --git a/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json b/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json index eae222b48..e5485e4da 100644 --- a/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json +++ b/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-baseline-applied-1", diff --git a/controls/C-0203-minimizetheadmissionofhostpathvolumes.json b/controls/C-0203-minimizetheadmissionofhostpathvolumes.json index 6cd23d1f5..ea7586c8c 100644 --- a/controls/C-0203-minimizetheadmissionofhostpathvolumes.json +++ b/controls/C-0203-minimizetheadmissionofhostpathvolumes.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-baseline-applied-1", diff --git a/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json b/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json index 9f3c5c839..aa9879848 100644 --- a/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json +++ b/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pod-security-admission-baseline-applied-1", diff --git a/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json b/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json index e63a4cc81..f3d3dbee9 100644 --- a/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json +++ b/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json @@ -9,7 +9,6 @@ "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-cni-in-use-supports-network-policies" diff --git a/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json b/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json index fb452754e..41b11369d 100644 --- a/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json +++ b/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "internal-networking" diff --git a/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json b/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json index bf3a7c610..80efc67d4 100644 --- a/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json +++ b/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "rule-secrets-in-env-var" diff --git a/controls/C-0208-considerexternalsecretstorage.json b/controls/C-0208-considerexternalsecretstorage.json index 531a1fcf5..aa09a2ba1 100644 --- a/controls/C-0208-considerexternalsecretstorage.json +++ b/controls/C-0208-considerexternalsecretstorage.json @@ -12,7 +12,6 @@ "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "external-secret-storage" diff --git a/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json b/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json index 03de1615e..4a43be133 100644 --- a/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json +++ b/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "list-all-namespaces" diff --git a/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json b/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json index eb94bb243..017e8a472 100644 --- a/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json +++ b/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "set-seccomp-profile-RuntimeDefault" diff --git a/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json b/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json index d51668858..d6e3988f3 100644 --- a/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json +++ b/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" ], "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" diff --git a/controls/C-0212-thedefaultnamespaceshouldnotbeused.json b/controls/C-0212-thedefaultnamespaceshouldnotbeused.json index 92d0cb6b2..2840520ed 100644 --- a/controls/C-0212-thedefaultnamespaceshouldnotbeused.json +++ b/controls/C-0212-thedefaultnamespaceshouldnotbeused.json @@ -10,7 +10,6 @@ "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "pods-in-default-namespace", diff --git a/controls/C-0213-minimizetheadmissionofprivilegedcontainers.json b/controls/C-0213-minimizetheadmissionofprivilegedcontainers.json index 73ecf84f5..8644eb71c 100644 --- a/controls/C-0213-minimizetheadmissionofprivilegedcontainers.json +++ b/controls/C-0213-minimizetheadmissionofprivilegedcontainers.json @@ -10,7 +10,6 @@ "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "psp-deny-privileged-container" diff --git a/controls/C-0214-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json b/controls/C-0214-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json index ce11828ec..c6afbec98 100644 --- a/controls/C-0214-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json +++ b/controls/C-0214-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json @@ -9,7 +9,6 @@ "https://kubernetes.io/docs/concepts/policy/pod-security-policy" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "psp-deny-hostpid" diff --git a/controls/C-0215-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json b/controls/C-0215-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json index d4e00efb6..1de0d7356 100644 --- a/controls/C-0215-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json +++ b/controls/C-0215-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json @@ -9,7 +9,6 @@ "https://kubernetes.io/docs/concepts/policy/pod-security-policy" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "psp-deny-hostipc" diff --git a/controls/C-0216-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json b/controls/C-0216-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json index 05a610d69..c2e922b2b 100644 --- a/controls/C-0216-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json +++ b/controls/C-0216-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json @@ -9,7 +9,6 @@ "https://kubernetes.io/docs/concepts/policy/pod-security-policy" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "psp-deny-hostnetwork" diff --git a/controls/C-0217-minimizetheadmissionofcontainerswithallowprivilegeescalation.json b/controls/C-0217-minimizetheadmissionofcontainerswithallowprivilegeescalation.json index ebb9620ad..c534a5425 100644 --- a/controls/C-0217-minimizetheadmissionofcontainerswithallowprivilegeescalation.json +++ b/controls/C-0217-minimizetheadmissionofcontainerswithallowprivilegeescalation.json @@ -9,7 +9,6 @@ "https://kubernetes.io/docs/concepts/policy/pod-security-policy" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "psp-deny-allowprivilegeescalation" diff --git a/controls/C-0218-minimizetheadmissionofrootcontainers.json b/controls/C-0218-minimizetheadmissionofrootcontainers.json index 09bdb7782..0b63280a9 100644 --- a/controls/C-0218-minimizetheadmissionofrootcontainers.json +++ b/controls/C-0218-minimizetheadmissionofrootcontainers.json @@ -9,7 +9,6 @@ "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "psp-deny-root-container" diff --git a/controls/C-0219-minimizetheadmissionofcontainerswithaddedcapabilities.json b/controls/C-0219-minimizetheadmissionofcontainerswithaddedcapabilities.json index dd3beb276..71537bf19 100644 --- a/controls/C-0219-minimizetheadmissionofcontainerswithaddedcapabilities.json +++ b/controls/C-0219-minimizetheadmissionofcontainerswithaddedcapabilities.json @@ -10,7 +10,6 @@ "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "psp-deny-allowed-capabilities" diff --git a/controls/C-0220-minimizetheadmissionofcontainerswithcapabilitiesassigned.json b/controls/C-0220-minimizetheadmissionofcontainerswithcapabilitiesassigned.json index adbedb4fa..027031de9 100644 --- a/controls/C-0220-minimizetheadmissionofcontainerswithcapabilitiesassigned.json +++ b/controls/C-0220-minimizetheadmissionofcontainerswithcapabilitiesassigned.json @@ -10,7 +10,6 @@ "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "psp-required-drop-capabilities" diff --git a/controls/C-0221-ensureimagevulnerabilityscanningusingamazonecrimagescanningorathirdpartyprovider.json b/controls/C-0221-ensureimagevulnerabilityscanningusingamazonecrimagescanningorathirdpartyprovider.json index 965ca6348..c0ac2db13 100644 --- a/controls/C-0221-ensureimagevulnerabilityscanningusingamazonecrimagescanningorathirdpartyprovider.json +++ b/controls/C-0221-ensureimagevulnerabilityscanningusingamazonecrimagescanningorathirdpartyprovider.json @@ -9,7 +9,6 @@ "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-image-scanning-enabled-cloud" diff --git a/controls/C-0222-minimizeuseraccesstoamazonecr.json b/controls/C-0222-minimizeuseraccesstoamazonecr.json index 9b07514fc..062f8c322 100644 --- a/controls/C-0222-minimizeuseraccesstoamazonecr.json +++ b/controls/C-0222-minimizeuseraccesstoamazonecr.json @@ -9,7 +9,6 @@ "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-aws-policies-are-present" diff --git a/controls/C-0223-minimizeclusteraccesstoreadonlyforamazonecr.json b/controls/C-0223-minimizeclusteraccesstoreadonlyforamazonecr.json index 48038f40d..d77cc23b7 100644 --- a/controls/C-0223-minimizeclusteraccesstoreadonlyforamazonecr.json +++ b/controls/C-0223-minimizeclusteraccesstoreadonlyforamazonecr.json @@ -9,7 +9,6 @@ "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure_nodeinstancerole_has_right_permissions_for_ecr" diff --git a/controls/C-0225-preferusingdedicatedeksserviceaccounts.json b/controls/C-0225-preferusingdedicatedeksserviceaccounts.json index 937fe41f0..a45cb1332 100644 --- a/controls/C-0225-preferusingdedicatedeksserviceaccounts.json +++ b/controls/C-0225-preferusingdedicatedeksserviceaccounts.json @@ -11,7 +11,6 @@ "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-default-service-accounts-has-only-default-roles", diff --git a/controls/C-0226-preferusingacontaineroptimizedoswhenpossible.json b/controls/C-0226-preferusingacontaineroptimizedoswhenpossible.json index 7090d5651..207e0fb65 100644 --- a/controls/C-0226-preferusingacontaineroptimizedoswhenpossible.json +++ b/controls/C-0226-preferusingacontaineroptimizedoswhenpossible.json @@ -10,7 +10,6 @@ "https://aws.amazon.com/bottlerocket/" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "alert-container-optimized-os-not-in-use" diff --git a/controls/C-0227-restrictaccesstothecontrolplaneendpoint.json b/controls/C-0227-restrictaccesstothecontrolplaneendpoint.json index d3f811ee0..1334d4e81 100644 --- a/controls/C-0227-restrictaccesstothecontrolplaneendpoint.json +++ b/controls/C-0227-restrictaccesstothecontrolplaneendpoint.json @@ -9,7 +9,6 @@ "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" ], "attributes": { - "armoBuiltin": true }, "rulesNames": ["ensure-endpointprivateaccess-is-enabled"], "baseScore": 8.0, diff --git a/controls/C-0228-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json b/controls/C-0228-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json index a761077d7..d5b56ad8f 100644 --- a/controls/C-0228-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json +++ b/controls/C-0228-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json @@ -9,7 +9,6 @@ "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" ], "attributes": { - "armoBuiltin": true }, "rulesNames": ["ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks"], "baseScore": 8.0, diff --git a/controls/C-0229-ensureclustersarecreatedwithprivatenodes.json b/controls/C-0229-ensureclustersarecreatedwithprivatenodes.json index 2385ad0a2..cc4a8ceb5 100644 --- a/controls/C-0229-ensureclustersarecreatedwithprivatenodes.json +++ b/controls/C-0229-ensureclustersarecreatedwithprivatenodes.json @@ -7,7 +7,6 @@ "manual_test": "", "references": [], "attributes": { - "armoBuiltin": true }, "rulesNames": ["ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks"], "baseScore": 8.0, diff --git a/controls/C-0230-ensurenetworkpolicyisenabledandsetasappropriate.json b/controls/C-0230-ensurenetworkpolicyisenabledandsetasappropriate.json index 14d578d16..08a99d846 100644 --- a/controls/C-0230-ensurenetworkpolicyisenabledandsetasappropriate.json +++ b/controls/C-0230-ensurenetworkpolicyisenabledandsetasappropriate.json @@ -7,7 +7,6 @@ "manual_test": "", "references": [], "attributes": { - "armoBuiltin": true }, "rulesNames": ["ensure-network-policy-is-enabled-eks"], "baseScore": 6.0, diff --git a/controls/C-0231-encrypttraffictohttpsloadbalancerswithtlscertificates.json b/controls/C-0231-encrypttraffictohttpsloadbalancerswithtlscertificates.json index d38d84530..b7ad51993 100644 --- a/controls/C-0231-encrypttraffictohttpsloadbalancerswithtlscertificates.json +++ b/controls/C-0231-encrypttraffictohttpsloadbalancerswithtlscertificates.json @@ -9,7 +9,6 @@ "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" ], "attributes": { - "armoBuiltin": true }, "rulesNames": ["ensure-https-loadbalancers-encrypted-with-tls-aws"], "baseScore": 5.0, diff --git a/controls/C-0232-managekubernetesrbacuserswithawsiamauthenticatorforkubernetesorupgradetoawscliv116156.json b/controls/C-0232-managekubernetesrbacuserswithawsiamauthenticatorforkubernetesorupgradetoawscliv116156.json index d09998ff6..c670f317f 100644 --- a/controls/C-0232-managekubernetesrbacuserswithawsiamauthenticatorforkubernetesorupgradetoawscliv116156.json +++ b/controls/C-0232-managekubernetesrbacuserswithawsiamauthenticatorforkubernetesorupgradetoawscliv116156.json @@ -10,7 +10,6 @@ "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "review-roles-with-aws-iam-authenticator" diff --git a/controls/C-0233-considerfargateforrunninguntrustedworkloads.json b/controls/C-0233-considerfargateforrunninguntrustedworkloads.json index 61e0d1910..12d6c2746 100644 --- a/controls/C-0233-considerfargateforrunninguntrustedworkloads.json +++ b/controls/C-0233-considerfargateforrunninguntrustedworkloads.json @@ -9,7 +9,6 @@ "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "alert-fargate-not-in-use" diff --git a/controls/C-0234-considerexternalsecretstorage.json b/controls/C-0234-considerexternalsecretstorage.json index 93551b69a..1f67bb928 100644 --- a/controls/C-0234-considerexternalsecretstorage.json +++ b/controls/C-0234-considerexternalsecretstorage.json @@ -7,7 +7,6 @@ "manual_test": "Review your secrets management implementation.", "references": [], "attributes": { - "armoBuiltin": true }, "rulesNames": ["ensure-external-secrets-storage-is-in-use"], "baseScore": 6.0, diff --git a/controls/C-0235-ensurethatthekubeletconfigurationfilehaspermissionssetto644ormorerestrictive.json b/controls/C-0235-ensurethatthekubeletconfigurationfilehaspermissionssetto644ormorerestrictive.json index c651e3ff7..a4b5deeb5 100644 --- a/controls/C-0235-ensurethatthekubeletconfigurationfilehaspermissionssetto644ormorerestrictive.json +++ b/controls/C-0235-ensurethatthekubeletconfigurationfilehaspermissionssetto644ormorerestrictive.json @@ -9,7 +9,6 @@ "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive" diff --git a/controls/C-0236-verifyimagesignature.json b/controls/C-0236-verifyimagesignature.json index c44ffa3eb..869329595 100644 --- a/controls/C-0236-verifyimagesignature.json +++ b/controls/C-0236-verifyimagesignature.json @@ -7,8 +7,7 @@ "manual_test": "", "references": [], "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true + "actionRequired": "configuration" }, "rulesNames": [ "verify-image-signature" diff --git a/controls/C-0237-hasimagesignature.json b/controls/C-0237-hasimagesignature.json index fde0b9557..7ebf3a0b9 100644 --- a/controls/C-0237-hasimagesignature.json +++ b/controls/C-0237-hasimagesignature.json @@ -7,7 +7,6 @@ "manual_test": "", "references": [], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "has-image-signature" diff --git a/controls/C-0238-ensurethatthekubeconfigfilepermissionsaresetto644ormorerestrictive.json b/controls/C-0238-ensurethatthekubeconfigfilepermissionsaresetto644ormorerestrictive.json index 9852350e2..579deb1fe 100644 --- a/controls/C-0238-ensurethatthekubeconfigfilepermissionsaresetto644ormorerestrictive.json +++ b/controls/C-0238-ensurethatthekubeconfigfilepermissionsaresetto644ormorerestrictive.json @@ -9,7 +9,6 @@ "https://kubernetes.io/docs/admin/kube-proxy/" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive" diff --git a/controls/C-0239-preferusingdedicatedaksserviceaccounts.json b/controls/C-0239-preferusingdedicatedaksserviceaccounts.json index a2159db1f..ecdb5c39c 100644 --- a/controls/C-0239-preferusingdedicatedaksserviceaccounts.json +++ b/controls/C-0239-preferusingdedicatedaksserviceaccounts.json @@ -9,7 +9,6 @@ "" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-default-service-accounts-has-only-default-roles" diff --git a/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json b/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json index 5faee94ea..820ae79e6 100644 --- a/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json +++ b/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json @@ -9,7 +9,6 @@ "\n\n " ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "rule-cni-enabled-aks" diff --git a/controls/C-0241-useazurerbacforkubernetesauthorization.json b/controls/C-0241-useazurerbacforkubernetesauthorization.json index c49ccdac6..cb266de34 100644 --- a/controls/C-0241-useazurerbacforkubernetesauthorization.json +++ b/controls/C-0241-useazurerbacforkubernetesauthorization.json @@ -9,7 +9,6 @@ "" ], "attributes": { - "armoBuiltin": true }, "rulesNames": ["ensure-azure-rbac-is-set"], "baseScore": 7, diff --git a/controls/C-0242-hostilemultitenantworkloads.json b/controls/C-0242-hostilemultitenantworkloads.json index 62e98a3bb..a43c8afcb 100644 --- a/controls/C-0242-hostilemultitenantworkloads.json +++ b/controls/C-0242-hostilemultitenantworkloads.json @@ -9,7 +9,6 @@ "" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "rule-hostile-multitenant-workloads" diff --git a/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json b/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json index daa7c6d2e..e58ca3937 100644 --- a/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json +++ b/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json @@ -9,7 +9,6 @@ "\n\n \n\n " ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider" diff --git a/controls/C-0244-ensurekubernetessecretsareencrypted.json b/controls/C-0244-ensurekubernetessecretsareencrypted.json index 3bb263573..d164330f9 100644 --- a/controls/C-0244-ensurekubernetessecretsareencrypted.json +++ b/controls/C-0244-ensurekubernetessecretsareencrypted.json @@ -9,7 +9,6 @@ "" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "secret-etcd-encryption-cloud" diff --git a/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json b/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json index d2c800b01..00739a944 100644 --- a/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json +++ b/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json @@ -9,7 +9,6 @@ "" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "encrypt-traffic-to-https-load-balancers-with-tls-certificates" diff --git a/controls/C-0246-avoiduseofsystemmastersgroup.json b/controls/C-0246-avoiduseofsystemmastersgroup.json index da7412a5a..396738b3a 100644 --- a/controls/C-0246-avoiduseofsystemmastersgroup.json +++ b/controls/C-0246-avoiduseofsystemmastersgroup.json @@ -9,7 +9,6 @@ "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "rule-manual" diff --git a/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json b/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json index b9d730671..9dd307d28 100644 --- a/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json +++ b/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json @@ -9,7 +9,6 @@ "" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "restrict-access-to-the-control-plane-endpoint" diff --git a/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json b/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json index aca15749d..cd7d2bf39 100644 --- a/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json +++ b/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json @@ -9,7 +9,6 @@ "" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-clusters-are-created-with-private-nodes" diff --git a/controls/C-0249-restrictuntrustedworkloads.json b/controls/C-0249-restrictuntrustedworkloads.json index e2c5d3123..eb4c80d06 100644 --- a/controls/C-0249-restrictuntrustedworkloads.json +++ b/controls/C-0249-restrictuntrustedworkloads.json @@ -9,7 +9,6 @@ "\n\n \n\n " ], "attributes": { - "armoBuiltin": true, "actionRequired": "manual review" }, "rulesNames": [ diff --git a/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json b/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json index 674e4c2b6..e3c0779a1 100644 --- a/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json +++ b/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json @@ -9,7 +9,6 @@ "" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-service-principle-has-read-only-permissions" diff --git a/controls/C-0251-minimizeuseraccesstoazurecontainerregistryacr.json b/controls/C-0251-minimizeuseraccesstoazurecontainerregistryacr.json index a8d5af221..9029e5289 100644 --- a/controls/C-0251-minimizeuseraccesstoazurecontainerregistryacr.json +++ b/controls/C-0251-minimizeuseraccesstoazurecontainerregistryacr.json @@ -9,7 +9,6 @@ "" ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "list-role-definitions-in-acr" diff --git a/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json b/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json index 15d9a5c14..fc0e3e193 100644 --- a/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json +++ b/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json @@ -9,7 +9,6 @@ "\n\n " ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled" diff --git a/controls/C-0253-deprecated-k8s-registry.json b/controls/C-0253-deprecated-k8s-registry.json index 35fa98c76..d112dbf87 100644 --- a/controls/C-0253-deprecated-k8s-registry.json +++ b/controls/C-0253-deprecated-k8s-registry.json @@ -1,7 +1,6 @@ { "name": "Deprecated Kubernetes image registry", "attributes": { - "armoBuiltin": true }, "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", diff --git a/controls/C-0254-enableauditlogs.json b/controls/C-0254-enableauditlogs.json index eb9ef642f..a35ec5daf 100644 --- a/controls/C-0254-enableauditlogs.json +++ b/controls/C-0254-enableauditlogs.json @@ -9,7 +9,6 @@ "\n\n \n\n " ], "attributes": { - "armoBuiltin": true }, "rulesNames": [ "rule-manual" diff --git a/controls/C-0255-workloadwithsecretaccess.json b/controls/C-0255-workloadwithsecretaccess.json index 686a72577..69a3b1264 100644 --- a/controls/C-0255-workloadwithsecretaccess.json +++ b/controls/C-0255-workloadwithsecretaccess.json @@ -1,7 +1,6 @@ { "name": "Workload with secret access", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], diff --git a/controls/C-0256-exposuretointernet.json b/controls/C-0256-exposuretointernet.json index a65f5b3f5..c82bd03c0 100644 --- a/controls/C-0256-exposuretointernet.json +++ b/controls/C-0256-exposuretointernet.json @@ -1,7 +1,6 @@ { "name": "Exposure to internet", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], diff --git a/controls/C-0257-pvcaccess.json b/controls/C-0257-pvcaccess.json index 19a1b77f7..2040ebd41 100644 --- a/controls/C-0257-pvcaccess.json +++ b/controls/C-0257-pvcaccess.json @@ -1,7 +1,6 @@ { "name": "Workload with PVC access", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], diff --git a/controls/C-0258-configmapaccess.json b/controls/C-0258-configmapaccess.json index 2b15ba4d7..492a85209 100644 --- a/controls/C-0258-configmapaccess.json +++ b/controls/C-0258-configmapaccess.json @@ -1,7 +1,6 @@ { "name": "Workload with ConfigMap access", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], diff --git a/controls/C-0259-workloadwithcredentialaccess.json b/controls/C-0259-workloadwithcredentialaccess.json index 9153afd41..99de6b5c8 100644 --- a/controls/C-0259-workloadwithcredentialaccess.json +++ b/controls/C-0259-workloadwithcredentialaccess.json @@ -1,7 +1,6 @@ { "name": "Workload with credential access", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], diff --git a/controls/C-0260-missingnetworkpolicy.json b/controls/C-0260-missingnetworkpolicy.json index f51ad8c42..1669d1c61 100644 --- a/controls/C-0260-missingnetworkpolicy.json +++ b/controls/C-0260-missingnetworkpolicy.json @@ -1,7 +1,6 @@ { "name": "Missing network policy", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], diff --git a/controls/C-0261-satokenmounted.json b/controls/C-0261-satokenmounted.json index 5e438e4af..aa15abcfb 100644 --- a/controls/C-0261-satokenmounted.json +++ b/controls/C-0261-satokenmounted.json @@ -1,7 +1,6 @@ { "name": "ServiceAccount token mounted", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], diff --git a/controls/C-0262-anonymousaccessisenabled.json b/controls/C-0262-anonymousaccessisenabled.json index acc295a63..c82021bd9 100644 --- a/controls/C-0262-anonymousaccessisenabled.json +++ b/controls/C-0262-anonymousaccessisenabled.json @@ -5,7 +5,6 @@ "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", "attributes": { - "armoBuiltin": true }, "rulesNames": [ "anonymous-access-enabled" diff --git a/default-config-inputs.json b/default-config-inputs.json index c985e96bf..b2a748767 100644 --- a/default-config-inputs.json +++ b/default-config-inputs.json @@ -1,7 +1,6 @@ { "name": "default", "attributes": { - "armoBuiltin": true }, "scope": { "designatorType": "attributes", diff --git a/frameworks/__YAMLscan.json b/frameworks/__YAMLscan.json index 8c42e60b7..8508dcebc 100644 --- a/frameworks/__YAMLscan.json +++ b/frameworks/__YAMLscan.json @@ -2,7 +2,6 @@ "name": "YAML-scanning", "description": "Controls relevant to yamls", "attributes": { - "armoBuiltin": true }, "scanningScope": { "matches": [ diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index 890e01015..f4d738ade 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -2,7 +2,6 @@ "name": "AllControls", "description": "Contains all the controls from all the frameworks", "attributes": { - "armoBuiltin": true }, "scanningScope": { "matches": [ diff --git a/frameworks/armobest.json b/frameworks/armobest.json index 04c639036..7f1b62931 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -2,7 +2,6 @@ "name": "ArmoBest", "description": "", "attributes": { - "armoBuiltin": true }, "scanningScope": { "matches": [ diff --git a/frameworks/cis-aks-t1.2.0.json b/frameworks/cis-aks-t1.2.0.json index 7eac30cec..65d785cce 100644 --- a/frameworks/cis-aks-t1.2.0.json +++ b/frameworks/cis-aks-t1.2.0.json @@ -2,8 +2,7 @@ "name": "cis-aks-t1.2.0", "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", "attributes": { - "version": "v1.2.0", - "armoBuiltin": true + "version": "v1.2.0" }, "scanningScope": { "matches": [ diff --git a/frameworks/cis-eks-t1.2.0.json b/frameworks/cis-eks-t1.2.0.json index 4dc1ffda5..7619b9f80 100644 --- a/frameworks/cis-eks-t1.2.0.json +++ b/frameworks/cis-eks-t1.2.0.json @@ -2,8 +2,7 @@ "name": "cis-eks-t1.2.0", "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", "attributes": { - "version": "v1.2.0", - "armoBuiltin": true + "version": "v1.2.0" }, "scanningScope": { "matches": [ diff --git a/frameworks/cis-v1.23-t1.0.1.json b/frameworks/cis-v1.23-t1.0.1.json index cfd884b5d..4cc6d0829 100644 --- a/frameworks/cis-v1.23-t1.0.1.json +++ b/frameworks/cis-v1.23-t1.0.1.json @@ -2,8 +2,7 @@ "name": "cis-v1.23-t1.0.1", "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", "attributes": { - "version": "v1.0.1", - "armoBuiltin": true + "version": "v1.0.1" }, "scanningScope": { "matches": [ diff --git a/frameworks/clusterscan.json b/frameworks/clusterscan.json index a97ab72ec..713fddf82 100644 --- a/frameworks/clusterscan.json +++ b/frameworks/clusterscan.json @@ -2,7 +2,6 @@ "name": "ClusterScan", "description": "Framework for scanning a cluster", "attributes": { - "armoBuiltin": true }, "typeTags": [ "security" diff --git a/frameworks/devopsbest.json b/frameworks/devopsbest.json index 55906992b..f663274b3 100644 --- a/frameworks/devopsbest.json +++ b/frameworks/devopsbest.json @@ -2,7 +2,6 @@ "name": "DevOpsBest", "description": "", "attributes": { - "armoBuiltin": true }, "scanningScope": { "matches": [ diff --git a/frameworks/mitre.json b/frameworks/mitre.json index 460d794b3..00b8ff94e 100644 --- a/frameworks/mitre.json +++ b/frameworks/mitre.json @@ -2,7 +2,6 @@ "name": "MITRE", "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", "attributes": { - "armoBuiltin": true }, "scanningScope": { "matches": [ diff --git a/frameworks/nsaframework.json b/frameworks/nsaframework.json index 3955df176..09da0b0cf 100644 --- a/frameworks/nsaframework.json +++ b/frameworks/nsaframework.json @@ -2,7 +2,6 @@ "name": "NSA", "description": "Implement NSA security advices for K8s ", "attributes": { - "armoBuiltin": true }, "scanningScope": { "matches": [ diff --git a/frameworks/security.json b/frameworks/security.json index 6f9e23805..893c8c2bb 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -2,7 +2,6 @@ "name": "security", "description": "Controls that are used to assess security threats.", "attributes": { - "armoBuiltin": true }, "typeTags": [ "security" diff --git a/frameworks/workloadscan.json b/frameworks/workloadscan.json index c92f72ea3..74a031e9e 100644 --- a/frameworks/workloadscan.json +++ b/frameworks/workloadscan.json @@ -2,7 +2,6 @@ "name": "WorkloadScan", "description": "Framework for scanning a workload", "attributes": { - "armoBuiltin": true }, "typeTags": [ "security" diff --git a/gitregostore/gitstoreutils_test.go b/gitregostore/gitstoreutils_test.go index e88772faf..be8f73c7a 100644 --- a/gitregostore/gitstoreutils_test.go +++ b/gitregostore/gitstoreutils_test.go @@ -345,7 +345,7 @@ func TestSetControls(t *testing.T) { } // - respStr = `[{"name":"TEST","attributes":{"armoBuiltin":true,"controlTypeTags":["security","compliance"],"attackTracks":[{"attackTrack": "container","categories": ["Execution","Initial access"]},{"attackTrack": "network","categories": ["Eavesdropping","Spoofing"]}]},"description":"","remediation":"","rulesNames":["CVE-2022-0185"],"id":"C-0079","long_description":"","test":"","controlID":"C-0079","baseScore":4,"example":""}]` + respStr = `[{"name":"TEST","attributes":{"controlTypeTags":["security","compliance"],"attackTracks":[{"attackTrack": "container","categories": ["Execution","Initial access"]},{"attackTrack": "network","categories": ["Eavesdropping","Spoofing"]}]},"description":"","remediation":"","rulesNames":["CVE-2022-0185"],"id":"C-0079","long_description":"","test":"","controlID":"C-0079","baseScore":4,"example":""}]` err = store.setControls(respStr) if err != nil { t.Errorf("Error setting controls: %v", err) diff --git a/rules/CVE-2021-25741/rule.metadata.json b/rules/CVE-2021-25741/rule.metadata.json index 7ea036588..be106a659 100644 --- a/rules/CVE-2021-25741/rule.metadata.json +++ b/rules/CVE-2021-25741/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/CVE-2021-25742/rule.metadata.json b/rules/CVE-2021-25742/rule.metadata.json index 6ea23d990..567147708 100644 --- a/rules/CVE-2021-25742/rule.metadata.json +++ b/rules/CVE-2021-25742/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "nginx-ingress-snippet-annotation-vulnerability", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/CVE-2022-0185/rule.metadata.json b/rules/CVE-2022-0185/rule.metadata.json index a50004be9..8649f2f19 100644 --- a/rules/CVE-2022-0185/rule.metadata.json +++ b/rules/CVE-2022-0185/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-0185", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/CVE-2022-0492/rule.metadata.json b/rules/CVE-2022-0492/rule.metadata.json index 58c423695..31e1d6b06 100644 --- a/rules/CVE-2022-0492/rule.metadata.json +++ b/rules/CVE-2022-0492/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-0492", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/CVE-2022-23648/rule.metadata.json b/rules/CVE-2022-23648/rule.metadata.json index 0413a3cce..52262a4d2 100644 --- a/rules/CVE-2022-23648/rule.metadata.json +++ b/rules/CVE-2022-23648/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-23648", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/CVE-2022-24348/rule.metadata.json b/rules/CVE-2022-24348/rule.metadata.json index 8597e6907..7024003a2 100644 --- a/rules/CVE-2022-24348/rule.metadata.json +++ b/rules/CVE-2022-24348/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-24348", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/CVE-2022-3172/rule.metadata.json b/rules/CVE-2022-3172/rule.metadata.json index 277ea2706..dd0333f8f 100644 --- a/rules/CVE-2022-3172/rule.metadata.json +++ b/rules/CVE-2022-3172/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-3172", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/CVE-2022-39328/rule.metadata.json b/rules/CVE-2022-39328/rule.metadata.json index a1c25f088..6db538630 100644 --- a/rules/CVE-2022-39328/rule.metadata.json +++ b/rules/CVE-2022-39328/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-39328", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/CVE-2022-47633/rule.metadata.json b/rules/CVE-2022-47633/rule.metadata.json index b314635cc..87bb0f2b4 100644 --- a/rules/CVE-2022-47633/rule.metadata.json +++ b/rules/CVE-2022-47633/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "CVE-2022-47633", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive/rule.metadata.json b/rules/Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive/rule.metadata.json index 869aad6c4..068a05601 100644 --- a/rules/Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive/rule.metadata.json +++ b/rules/Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/access-container-service-account-v1/rule.metadata.json b/rules/access-container-service-account-v1/rule.metadata.json index efe4e87c1..26f708f90 100644 --- a/rules/access-container-service-account-v1/rule.metadata.json +++ b/rules/access-container-service-account-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "access-container-service-account-v1", "attributes": { "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/access-container-service-account/rule.metadata.json b/rules/access-container-service-account/rule.metadata.json index d097827b9..c29218196 100644 --- a/rules/access-container-service-account/rule.metadata.json +++ b/rules/access-container-service-account/rule.metadata.json @@ -2,7 +2,6 @@ "name": "access-container-service-account", "attributes": { "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/alert-any-hostpath/rule.metadata.json b/rules/alert-any-hostpath/rule.metadata.json index e5d329d31..224cf4713 100644 --- a/rules/alert-any-hostpath/rule.metadata.json +++ b/rules/alert-any-hostpath/rule.metadata.json @@ -1,8 +1,7 @@ { "name": "alert-any-hostpath", "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount", - "armoBuiltin": true + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/alert-container-optimized-os-not-in-use/rule.metadata.json b/rules/alert-container-optimized-os-not-in-use/rule.metadata.json index 72fd5da1c..e20724003 100644 --- a/rules/alert-container-optimized-os-not-in-use/rule.metadata.json +++ b/rules/alert-container-optimized-os-not-in-use/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "alert-container-optimized-os-not-in-use", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/alert-fargate-not-in-use/rule.metadata.json b/rules/alert-fargate-not-in-use/rule.metadata.json index f45b9dd12..9931ad4b5 100644 --- a/rules/alert-fargate-not-in-use/rule.metadata.json +++ b/rules/alert-fargate-not-in-use/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "alert-fargate-not-in-use", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/alert-mount-potential-credentials-paths/rule.metadata.json b/rules/alert-mount-potential-credentials-paths/rule.metadata.json index 10ea952cc..58c0eb9dc 100644 --- a/rules/alert-mount-potential-credentials-paths/rule.metadata.json +++ b/rules/alert-mount-potential-credentials-paths/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "alert-mount-potential-credentials-paths", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/alert-rw-hostpath/rule.metadata.json b/rules/alert-rw-hostpath/rule.metadata.json index c41eddc35..f502cdb97 100644 --- a/rules/alert-rw-hostpath/rule.metadata.json +++ b/rules/alert-rw-hostpath/rule.metadata.json @@ -1,8 +1,7 @@ { "name": "alert-rw-hostpath", "attributes": { - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host", - "armoBuiltin": true + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/anonymous-access-enabled/rule.metadata.json b/rules/anonymous-access-enabled/rule.metadata.json index ed0b38635..8419cd25d 100644 --- a/rules/anonymous-access-enabled/rule.metadata.json +++ b/rules/anonymous-access-enabled/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "anonymous-access-enabled", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/anonymous-requests-to-kubelet-updated/rule.metadata.json b/rules/anonymous-requests-to-kubelet-updated/rule.metadata.json index dc9d88007..bb5f93163 100644 --- a/rules/anonymous-requests-to-kubelet-updated/rule.metadata.json +++ b/rules/anonymous-requests-to-kubelet-updated/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "anonymous-requests-to-kubelet-service-updated", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/audit-policy-content/rule.metadata.json b/rules/audit-policy-content/rule.metadata.json index b68e74cf5..ebbc8faa1 100644 --- a/rules/audit-policy-content/rule.metadata.json +++ b/rules/audit-policy-content/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "audit-policy-content", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/automount-default-service-account/rule.metadata.json b/rules/automount-default-service-account/rule.metadata.json index 5c9ed5ebc..492e39d30 100644 --- a/rules/automount-default-service-account/rule.metadata.json +++ b/rules/automount-default-service-account/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "automount-default-service-account", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/automount-service-account/rule.metadata.json b/rules/automount-service-account/rule.metadata.json index f007931a9..39cbac449 100644 --- a/rules/automount-service-account/rule.metadata.json +++ b/rules/automount-service-account/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "automount-service-account", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/cluster-admin-role/rule.metadata.json b/rules/cluster-admin-role/rule.metadata.json index aebce77d0..fccdec3f6 100644 --- a/rules/cluster-admin-role/rule.metadata.json +++ b/rules/cluster-admin-role/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "cluster-admin-role", "attributes": { - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/configmap-in-default-namespace/rule.metadata.json b/rules/configmap-in-default-namespace/rule.metadata.json index 100d36e58..9b1bd6cce 100644 --- a/rules/configmap-in-default-namespace/rule.metadata.json +++ b/rules/configmap-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "configmap-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/configured-liveness-probe/rule.metadata.json b/rules/configured-liveness-probe/rule.metadata.json index 6e2247163..fbc8c3449 100644 --- a/rules/configured-liveness-probe/rule.metadata.json +++ b/rules/configured-liveness-probe/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "configured-liveness-probe", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/configured-readiness-probe/rule.metadata.json b/rules/configured-readiness-probe/rule.metadata.json index ae6c96313..1b75c1ec4 100644 --- a/rules/configured-readiness-probe/rule.metadata.json +++ b/rules/configured-readiness-probe/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "configured-readiness-probe", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/container-hostPort/rule.metadata.json b/rules/container-hostPort/rule.metadata.json index 6f59ab962..3ee8ab535 100644 --- a/rules/container-hostPort/rule.metadata.json +++ b/rules/container-hostPort/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "container-hostPort", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/container-image-repository-v1/rule.metadata.json b/rules/container-image-repository-v1/rule.metadata.json index 292c8495d..22cba00f5 100644 --- a/rules/container-image-repository-v1/rule.metadata.json +++ b/rules/container-image-repository-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "container-image-repository-v1", "attributes": { "m$K8sThreatMatrix": "Collection::Images from private registry", - "armoBuiltin": true, "useFromKubescapeVersion": "v2.9.0" }, "ruleLanguage": "Rego", diff --git a/rules/container-image-repository/rule.metadata.json b/rules/container-image-repository/rule.metadata.json index 989bceb09..7be7bbf89 100644 --- a/rules/container-image-repository/rule.metadata.json +++ b/rules/container-image-repository/rule.metadata.json @@ -2,7 +2,6 @@ "name": "container-image-repository", "attributes": { "m$K8sThreatMatrix": "Collection::Images from private registry", - "armoBuiltin": true, "useUntilKubescapeVersion": "v2.3.8" }, "ruleLanguage": "Rego", diff --git a/rules/containers-mounting-docker-socket/rule.metadata.json b/rules/containers-mounting-docker-socket/rule.metadata.json index 283bef733..5187c46a3 100644 --- a/rules/containers-mounting-docker-socket/rule.metadata.json +++ b/rules/containers-mounting-docker-socket/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "containers-mounting-docker-socket", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/csistoragecapacity-in-default-namespace/rule.metadata.json b/rules/csistoragecapacity-in-default-namespace/rule.metadata.json index a1d360607..f23a1df7b 100644 --- a/rules/csistoragecapacity-in-default-namespace/rule.metadata.json +++ b/rules/csistoragecapacity-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "csistoragecapacity-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/drop-capability-netraw/rule.metadata.json b/rules/drop-capability-netraw/rule.metadata.json index d3eaba387..4f085e093 100644 --- a/rules/drop-capability-netraw/rule.metadata.json +++ b/rules/drop-capability-netraw/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "drop-capability-netraw", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/encrypt-traffic-to-https-load-balancers-with-tls-certificates/rule.metadata.json b/rules/encrypt-traffic-to-https-load-balancers-with-tls-certificates/rule.metadata.json index 342db3f52..e857b989d 100644 --- a/rules/encrypt-traffic-to-https-load-balancers-with-tls-certificates/rule.metadata.json +++ b/rules/encrypt-traffic-to-https-load-balancers-with-tls-certificates/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "encrypt-traffic-to-https-load-balancers-with-tls-certificates", "attributes": { - "armoBuiltin": true, "hostSensorRule": "false", "imageScanRelated": false }, diff --git a/rules/endpoints-in-default-namespace/rule.metadata.json b/rules/endpoints-in-default-namespace/rule.metadata.json index 16dd4da7b..974e90296 100644 --- a/rules/endpoints-in-default-namespace/rule.metadata.json +++ b/rules/endpoints-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "endpoints-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/endpointslice-in-default-namespace/rule.metadata.json b/rules/endpointslice-in-default-namespace/rule.metadata.json index 27d9b3ff7..a603c5dd2 100644 --- a/rules/endpointslice-in-default-namespace/rule.metadata.json +++ b/rules/endpointslice-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "endpointslice-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/enforce-kubelet-client-tls-authentication-updated/rule.metadata.json b/rules/enforce-kubelet-client-tls-authentication-updated/rule.metadata.json index 27c330f5c..043e5e7b4 100644 --- a/rules/enforce-kubelet-client-tls-authentication-updated/rule.metadata.json +++ b/rules/enforce-kubelet-client-tls-authentication-updated/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "enforce-kubelet-client-tls-authentication-updated", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-aws-policies-are-present/rule.metadata.json b/rules/ensure-aws-policies-are-present/rule.metadata.json index 1b983e164..683738f18 100644 --- a/rules/ensure-aws-policies-are-present/rule.metadata.json +++ b/rules/ensure-aws-policies-are-present/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-aws-policies-are-present", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "dynamicMatch": [ diff --git a/rules/ensure-azure-rbac-is-set/rule.metadata.json b/rules/ensure-azure-rbac-is-set/rule.metadata.json index 6f7d105d7..4d87f1aff 100644 --- a/rules/ensure-azure-rbac-is-set/rule.metadata.json +++ b/rules/ensure-azure-rbac-is-set/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-azure-rbac-is-set", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "dynamicMatch": [ diff --git a/rules/ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled/rule.metadata.json b/rules/ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled/rule.metadata.json index 2662aa233..ffcee362b 100644 --- a/rules/ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled/rule.metadata.json +++ b/rules/ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled", "attributes": { - "armoBuiltin": true, "hostSensorRule": "false", "imageScanRelated": false }, diff --git a/rules/ensure-clusters-are-created-with-private-nodes/rule.metadata.json b/rules/ensure-clusters-are-created-with-private-nodes/rule.metadata.json index ba7b9f467..b1144fc1d 100644 --- a/rules/ensure-clusters-are-created-with-private-nodes/rule.metadata.json +++ b/rules/ensure-clusters-are-created-with-private-nodes/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-clusters-are-created-with-private-nodes", "attributes": { - "armoBuiltin": true, "hostSensorRule": false, "imageScanRelated": false }, diff --git a/rules/ensure-default-service-accounts-has-only-default-roles/rule.metadata.json b/rules/ensure-default-service-accounts-has-only-default-roles/rule.metadata.json index 4549dbc8c..f1e2e4479 100644 --- a/rules/ensure-default-service-accounts-has-only-default-roles/rule.metadata.json +++ b/rules/ensure-default-service-accounts-has-only-default-roles/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-default-service-accounts-has-only-default-roles", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks/rule.metadata.json b/rules/ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks/rule.metadata.json index ebd0191aa..94b922269 100644 --- a/rules/ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks/rule.metadata.json +++ b/rules/ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-endpointprivateaccess-is-enabled/rule.metadata.json b/rules/ensure-endpointprivateaccess-is-enabled/rule.metadata.json index d1d52c640..9567cd3d0 100644 --- a/rules/ensure-endpointprivateaccess-is-enabled/rule.metadata.json +++ b/rules/ensure-endpointprivateaccess-is-enabled/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-endpointprivateaccess-is-enabled", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks/rule.metadata.json b/rules/ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks/rule.metadata.json index e622b7b89..d2763c36e 100644 --- a/rules/ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks/rule.metadata.json +++ b/rules/ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-external-secrets-storage-is-in-use/rule.metadata.json b/rules/ensure-external-secrets-storage-is-in-use/rule.metadata.json index 73782a4d3..a0bd55f38 100644 --- a/rules/ensure-external-secrets-storage-is-in-use/rule.metadata.json +++ b/rules/ensure-external-secrets-storage-is-in-use/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-external-secrets-storage-is-in-use", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-https-loadbalancers-encrypted-with-tls-aws/rule.metadata.json b/rules/ensure-https-loadbalancers-encrypted-with-tls-aws/rule.metadata.json index 4627b8fbb..7eb58db02 100644 --- a/rules/ensure-https-loadbalancers-encrypted-with-tls-aws/rule.metadata.json +++ b/rules/ensure-https-loadbalancers-encrypted-with-tls-aws/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-https-loadbalancers-encrypted-with-tls-aws", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-image-scanning-enabled-cloud/rule.metadata.json b/rules/ensure-image-scanning-enabled-cloud/rule.metadata.json index 2b9176412..7dd06667a 100644 --- a/rules/ensure-image-scanning-enabled-cloud/rule.metadata.json +++ b/rules/ensure-image-scanning-enabled-cloud/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-image-scanning-enabled-cloud", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "dynamicMatch": [ diff --git a/rules/ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider/rule.metadata.json b/rules/ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider/rule.metadata.json index 4b8e56923..0e03d0b57 100644 --- a/rules/ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider/rule.metadata.json +++ b/rules/ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [], diff --git a/rules/ensure-network-policy-is-enabled-eks/rule.metadata.json b/rules/ensure-network-policy-is-enabled-eks/rule.metadata.json index c07665d3f..5d2b8b48e 100644 --- a/rules/ensure-network-policy-is-enabled-eks/rule.metadata.json +++ b/rules/ensure-network-policy-is-enabled-eks/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-network-policy-is-enabled-eks", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-service-principle-has-read-only-permissions/rule.metadata.json b/rules/ensure-service-principle-has-read-only-permissions/rule.metadata.json index 5d5729a0d..ac6639802 100644 --- a/rules/ensure-service-principle-has-read-only-permissions/rule.metadata.json +++ b/rules/ensure-service-principle-has-read-only-permissions/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-service-principle-has-read-only-permissions", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "dynamicMatch": [ diff --git a/rules/ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers/rule.metadata.json b/rules/ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers/rule.metadata.json index caff35020..15b2d4375 100644 --- a/rules/ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers/rule.metadata.json +++ b/rules/ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json index 5d02130ec..cc2cce906 100644 --- a/rules/ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index ed6c12993..1a53b0986 100644 --- a/rules/ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root/rule.metadata.json index de8a8c690..227fa639e 100644 --- a/rules/ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index 5738f87b0..48b4fefb3 100644 --- a/rules/ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index d9e48b420..a906903c3 100644 --- a/rules/ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root/rule.metadata.json index 4baafa603..9a5501ad2 100644 --- a/rules/ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600/rule.metadata.json b/rules/ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600/rule.metadata.json index 62a3c569d..38653ca77 100644 --- a/rules/ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600/rule.metadata.json +++ b/rules/ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-admin.conf-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-admin.conf-file-ownership-is-set-to-root-root/rule.metadata.json index f37b99149..31dd53c8b 100644 --- a/rules/ensure-that-the-admin.conf-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-admin.conf-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-admin.conf-file-permissions-are-set-to-600/rule.metadata.json b/rules/ensure-that-the-admin.conf-file-permissions-are-set-to-600/rule.metadata.json index de0d53a70..02ec7853d 100644 --- a/rules/ensure-that-the-admin.conf-file-permissions-are-set-to-600/rule.metadata.json +++ b/rules/ensure-that-the-admin.conf-file-permissions-are-set-to-600/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set/rule.metadata.json b/rules/ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set/rule.metadata.json index 763b6f081..c9beafa54 100644 --- a/rules/ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set/rule.metadata.json +++ b/rules/ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set/rule.metadata.json b/rules/ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set/rule.metadata.json index fcd22b13a..1cd351f20 100644 --- a/rules/ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set/rule.metadata.json +++ b/rules/ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-admission-control-plugin-EventRateLimit-is-set/rule.metadata.json b/rules/ensure-that-the-admission-control-plugin-EventRateLimit-is-set/rule.metadata.json index 849d0928c..03aa5b9e3 100644 --- a/rules/ensure-that-the-admission-control-plugin-EventRateLimit-is-set/rule.metadata.json +++ b/rules/ensure-that-the-admission-control-plugin-EventRateLimit-is-set/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set/rule.metadata.json b/rules/ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set/rule.metadata.json index 70bcbe44e..c1cb33ca1 100644 --- a/rules/ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set/rule.metadata.json +++ b/rules/ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-admission-control-plugin-NodeRestriction-is-set/rule.metadata.json b/rules/ensure-that-the-admission-control-plugin-NodeRestriction-is-set/rule.metadata.json index 37d3cb478..81f2dbdc4 100644 --- a/rules/ensure-that-the-admission-control-plugin-NodeRestriction-is-set/rule.metadata.json +++ b/rules/ensure-that-the-admission-control-plugin-NodeRestriction-is-set/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used/rule.metadata.json b/rules/ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used/rule.metadata.json index b518d505f..5bfb939d5 100644 --- a/rules/ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used/rule.metadata.json +++ b/rules/ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-admission-control-plugin-ServiceAccount-is-set/rule.metadata.json b/rules/ensure-that-the-admission-control-plugin-ServiceAccount-is-set/rule.metadata.json index cb03d9867..ab50edd93 100644 --- a/rules/ensure-that-the-admission-control-plugin-ServiceAccount-is-set/rule.metadata.json +++ b/rules/ensure-that-the-admission-control-plugin-ServiceAccount-is-set/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set/rule.metadata.json b/rules/ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set/rule.metadata.json index e73637bf8..92db6916b 100644 --- a/rules/ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set/rule.metadata.json +++ b/rules/ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false/rule.metadata.json b/rules/ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false/rule.metadata.json index f87800209..d49478d40 100644 --- a/rules/ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false/rule.metadata.json +++ b/rules/ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate/rule.metadata.json index dfeaa3032..ec6a9b66a 100644 --- a/rules/ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate/rule.metadata.json index 8b0774d09..c4f6839df 100644 --- a/rules/ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate/rule.metadata.json index f6b992b4f..76af08e90 100644 --- a/rules/ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true", "useFromKubescapeVersion": "v2.0.159" }, diff --git a/rules/ensure-that-the-api-server-audit-log-path-argument-is-set/rule.metadata.json b/rules/ensure-that-the-api-server-audit-log-path-argument-is-set/rule.metadata.json index 0382e6620..13fcd826e 100644 --- a/rules/ensure-that-the-api-server-audit-log-path-argument-is-set/rule.metadata.json +++ b/rules/ensure-that-the-api-server-audit-log-path-argument-is-set/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-authorization-mode-argument-includes-Node/rule.metadata.json b/rules/ensure-that-the-api-server-authorization-mode-argument-includes-Node/rule.metadata.json index e44b2db81..82715dd37 100644 --- a/rules/ensure-that-the-api-server-authorization-mode-argument-includes-Node/rule.metadata.json +++ b/rules/ensure-that-the-api-server-authorization-mode-argument-includes-Node/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-authorization-mode-argument-includes-RBAC/rule.metadata.json b/rules/ensure-that-the-api-server-authorization-mode-argument-includes-RBAC/rule.metadata.json index e5a1e7fe5..7992f4ee6 100644 --- a/rules/ensure-that-the-api-server-authorization-mode-argument-includes-RBAC/rule.metadata.json +++ b/rules/ensure-that-the-api-server-authorization-mode-argument-includes-RBAC/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow/rule.metadata.json b/rules/ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow/rule.metadata.json index a01f79941..2ecf5bcf1 100644 --- a/rules/ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow/rule.metadata.json +++ b/rules/ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate/rule.metadata.json index 769f90734..1d18ce2c6 100644 --- a/rules/ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate/rule.metadata.json index 3c6dd9599..4855e4658 100644 --- a/rules/ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-api-server-encryption-providers-are-appropriately-configured/rule.metadata.json b/rules/ensure-that-the-api-server-encryption-providers-are-appropriately-configured/rule.metadata.json index f0fa0f1a9..e067d4432 100644 --- a/rules/ensure-that-the-api-server-encryption-providers-are-appropriately-configured/rule.metadata.json +++ b/rules/ensure-that-the-api-server-encryption-providers-are-appropriately-configured/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate/rule.metadata.json index 3523b2dd4..9acd3ae47 100644 --- a/rules/ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate/rule.metadata.json index 01aa7a4f3..c366e6a1c 100644 --- a/rules/ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate/rule.metadata.json index ff43cd65f..ec9a8a90b 100644 --- a/rules/ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate/rule.metadata.json index b8d92ac73..4793b934c 100644 --- a/rules/ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-profiling-argument-is-set-to-false/rule.metadata.json b/rules/ensure-that-the-api-server-profiling-argument-is-set-to-false/rule.metadata.json index 20b014cf1..f3308351c 100644 --- a/rules/ensure-that-the-api-server-profiling-argument-is-set-to-false/rule.metadata.json +++ b/rules/ensure-that-the-api-server-profiling-argument-is-set-to-false/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate/rule.metadata.json index f4602037e..1d276cd72 100644 --- a/rules/ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-secure-port-argument-is-not-set-to-0/rule.metadata.json b/rules/ensure-that-the-api-server-secure-port-argument-is-not-set-to-0/rule.metadata.json index 8ef4c297e..dff71b9f1 100644 --- a/rules/ensure-that-the-api-server-secure-port-argument-is-not-set-to-0/rule.metadata.json +++ b/rules/ensure-that-the-api-server-secure-port-argument-is-not-set-to-0/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate/rule.metadata.json index cf000a0dc..638501fd3 100644 --- a/rules/ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true", "useFromKubescapeVersion": "v2.0.159" }, diff --git a/rules/ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true/rule.metadata.json b/rules/ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true/rule.metadata.json index 06f782fb6..6c82785cb 100644 --- a/rules/ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true/rule.metadata.json +++ b/rules/ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate/rule.metadata.json index a16c24707..434a42c45 100644 --- a/rules/ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-api-server-token-auth-file-parameter-is-not-set/rule.metadata.json b/rules/ensure-that-the-api-server-token-auth-file-parameter-is-not-set/rule.metadata.json index 0e787d555..69f121fd8 100644 --- a/rules/ensure-that-the-api-server-token-auth-file-parameter-is-not-set/rule.metadata.json +++ b/rules/ensure-that-the-api-server-token-auth-file-parameter-is-not-set/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index e8e8658d5..ea7d3eccb 100644 --- a/rules/ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root/rule.metadata.json index 44a650774..a0c5c7503 100644 --- a/rules/ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-cni-in-use-supports-network-policies/rule.metadata.json b/rules/ensure-that-the-cni-in-use-supports-network-policies/rule.metadata.json index df6a74c4a..8c5e119e7 100644 --- a/rules/ensure-that-the-cni-in-use-supports-network-policies/rule.metadata.json +++ b/rules/ensure-that-the-cni-in-use-supports-network-policies/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-cni-in-use-supports-network-policies", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true/rule.metadata.json b/rules/ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true/rule.metadata.json index 81fe89ea2..b863714b0 100644 --- a/rules/ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1/rule.metadata.json b/rules/ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1/rule.metadata.json index 323081f2f..cb50c6499 100644 --- a/rules/ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json index 89bacd267..132390ac7 100644 --- a/rules/ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index 6de688bf1..4681b2595 100644 --- a/rules/ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-controller-manager-profiling-argument-is-set-to-false/rule.metadata.json b/rules/ensure-that-the-controller-manager-profiling-argument-is-set-to-false/rule.metadata.json index c069d0684..d76782926 100644 --- a/rules/ensure-that-the-controller-manager-profiling-argument-is-set-to-false/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager-profiling-argument-is-set-to-false/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate/rule.metadata.json index 6d6d7781f..ddbaff1b7 100644 --- a/rules/ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate/rule.metadata.json index 40979ff34..b2e0046cb 100644 --- a/rules/ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate/rule.metadata.json b/rules/ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate/rule.metadata.json index ddd3ae8f6..e16d539fc 100644 --- a/rules/ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true/rule.metadata.json b/rules/ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true/rule.metadata.json index e22505dc3..7c493cd83 100644 --- a/rules/ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root/rule.metadata.json index 30290ee67..abef3a875 100644 --- a/rules/ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index 195383ef6..c94de82dd 100644 --- a/rules/ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd/rule.metadata.json b/rules/ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd/rule.metadata.json index 95d9d480c..3178ffcd1 100644 --- a/rules/ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd/rule.metadata.json +++ b/rules/ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive/rule.metadata.json index 8906bf5e3..9d91e7c67 100644 --- a/rules/ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json index ef08a93b0..8617cc394 100644 --- a/rules/ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index 76d7599d5..83127510f 100644 --- a/rules/ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root/rule.metadata.json index 3d22e8d5f..6374b36aa 100644 --- a/rules/ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index f00f4931f..0798c831c 100644 --- a/rules/ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive/rule.metadata.json index 5af700e25..f0fe4e6f4 100644 --- a/rules/ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root/rule.metadata.json index 0859e732a..418799c90 100644 --- a/rules/ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root/rule.metadata.json index fa87c6128..683695901 100644 --- a/rules/ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index fc627c5c8..0300b9474 100644 --- a/rules/ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1/rule.metadata.json b/rules/ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1/rule.metadata.json index 4a394e0ae..d0c7fe079 100644 --- a/rules/ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1/rule.metadata.json +++ b/rules/ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json index 0fc19891e..b26511559 100644 --- a/rules/ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index ac94be6df..071d70234 100644 --- a/rules/ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-scheduler-profiling-argument-is-set-to-false/rule.metadata.json b/rules/ensure-that-the-scheduler-profiling-argument-is-set-to-false/rule.metadata.json index 049b72786..474ca793e 100644 --- a/rules/ensure-that-the-scheduler-profiling-argument-is-set-to-false/rule.metadata.json +++ b/rules/ensure-that-the-scheduler-profiling-argument-is-set-to-false/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root/rule.metadata.json b/rules/ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root/rule.metadata.json index c678b3afc..44e2e1188 100644 --- a/rules/ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index 2cdb5e5fe..24d952319 100644 --- a/rules/ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/ensure_network_policy_configured_in_labels/rule.metadata.json b/rules/ensure_network_policy_configured_in_labels/rule.metadata.json index 64711b69e..3e72e46d7 100644 --- a/rules/ensure_network_policy_configured_in_labels/rule.metadata.json +++ b/rules/ensure_network_policy_configured_in_labels/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure_network_policy_configured_in_labels", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ensure_nodeinstancerole_has_right_permissions_for_ecr/rule.metadata.json b/rules/ensure_nodeinstancerole_has_right_permissions_for_ecr/rule.metadata.json index ad8ff0292..e7f4b1f26 100644 --- a/rules/ensure_nodeinstancerole_has_right_permissions_for_ecr/rule.metadata.json +++ b/rules/ensure_nodeinstancerole_has_right_permissions_for_ecr/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ensure_nodeinstancerole_has_right_permissions_for_ecr", "attributes": { - "armoBuiltin": true, "useFromKubescapeVersion": "v2.2.5" }, "ruleLanguage": "Rego", diff --git a/rules/etcd-auto-tls-disabled/rule.metadata.json b/rules/etcd-auto-tls-disabled/rule.metadata.json index be0dbf59e..e1c47da82 100644 --- a/rules/etcd-auto-tls-disabled/rule.metadata.json +++ b/rules/etcd-auto-tls-disabled/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "etcd-auto-tls-disabled", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/etcd-client-auth-cert/rule.metadata.json b/rules/etcd-client-auth-cert/rule.metadata.json index b71384cd5..58664390c 100644 --- a/rules/etcd-client-auth-cert/rule.metadata.json +++ b/rules/etcd-client-auth-cert/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "etcd-client-auth-cert", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/etcd-encryption-native/rule.metadata.json b/rules/etcd-encryption-native/rule.metadata.json index 7b18a6acd..abadf0ac7 100644 --- a/rules/etcd-encryption-native/rule.metadata.json +++ b/rules/etcd-encryption-native/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "etcd-encryption-native", "attributes": { - "armoBuiltin": true, "resourcesAggregator": "apiserver-pod", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/etcd-peer-auto-tls-disabled/rule.metadata.json b/rules/etcd-peer-auto-tls-disabled/rule.metadata.json index c220accbd..a99ea9024 100644 --- a/rules/etcd-peer-auto-tls-disabled/rule.metadata.json +++ b/rules/etcd-peer-auto-tls-disabled/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "etcd-peer-auto-tls-disabled", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/etcd-peer-client-auth-cert/rule.metadata.json b/rules/etcd-peer-client-auth-cert/rule.metadata.json index ed751e622..cc63ad8bc 100644 --- a/rules/etcd-peer-client-auth-cert/rule.metadata.json +++ b/rules/etcd-peer-client-auth-cert/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "etcd-peer-client-auth-cert", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/etcd-peer-tls-enabled/rule.metadata.json b/rules/etcd-peer-tls-enabled/rule.metadata.json index bbfa1861d..eeb9c791c 100644 --- a/rules/etcd-peer-tls-enabled/rule.metadata.json +++ b/rules/etcd-peer-tls-enabled/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "etcd-peer-tls-enabled", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/etcd-tls-enabled/rule.metadata.json b/rules/etcd-tls-enabled/rule.metadata.json index f4b91589d..8a368da6d 100644 --- a/rules/etcd-tls-enabled/rule.metadata.json +++ b/rules/etcd-tls-enabled/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "etcd-tls-enabled", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/etcd-unique-ca/rule.metadata.json b/rules/etcd-unique-ca/rule.metadata.json index a830c6d99..7104327b8 100644 --- a/rules/etcd-unique-ca/rule.metadata.json +++ b/rules/etcd-unique-ca/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "etcd-unique-ca", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/excessive_amount_of_vulnerabilities_pods/rule.metadata.json b/rules/excessive_amount_of_vulnerabilities_pods/rule.metadata.json index f72603726..c5482dc31 100644 --- a/rules/excessive_amount_of_vulnerabilities_pods/rule.metadata.json +++ b/rules/excessive_amount_of_vulnerabilities_pods/rule.metadata.json @@ -2,7 +2,6 @@ "name": "excessive_amount_of_vulnerabilities_pods", "attributes": { "microsoftK8sThreatMatrix": "Initial access::Exposed critical vulnerable pods", - "armoBuiltin": true, "useFromKubescapeVersion": "v1.0.133", "imageScanRelated": true }, diff --git a/rules/exec-into-container-v1/rule.metadata.json b/rules/exec-into-container-v1/rule.metadata.json index 66f49c77b..b0da940e9 100644 --- a/rules/exec-into-container-v1/rule.metadata.json +++ b/rules/exec-into-container-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "exec-into-container-v1", "attributes": { "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/exec-into-container/rule.metadata.json b/rules/exec-into-container/rule.metadata.json index 02b57ced7..17e089973 100644 --- a/rules/exec-into-container/rule.metadata.json +++ b/rules/exec-into-container/rule.metadata.json @@ -2,7 +2,6 @@ "name": "exec-into-container", "attributes": { "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/exposed-critical-pods/rule.metadata.json b/rules/exposed-critical-pods/rule.metadata.json index 0015fbb1d..d163b1fb2 100644 --- a/rules/exposed-critical-pods/rule.metadata.json +++ b/rules/exposed-critical-pods/rule.metadata.json @@ -2,7 +2,6 @@ "name": "exposed-critical-pods", "attributes": { "m$K8sThreatMatrix": "exposed-critical-pods", - "armoBuiltin": true, "imageScanRelated": true }, "ruleLanguage": "Rego", diff --git a/rules/exposed-rce-pods/rule.metadata.json b/rules/exposed-rce-pods/rule.metadata.json index 98f9c922d..f19277015 100644 --- a/rules/exposed-rce-pods/rule.metadata.json +++ b/rules/exposed-rce-pods/rule.metadata.json @@ -2,7 +2,6 @@ "name": "exposed-rce-pods", "attributes": { "m$K8sThreatMatrix": "exposed-rce-pods", - "armoBuiltin": true, "useFromKubescapeVersion": "v2.0.150", "imageScanRelated": true diff --git a/rules/exposed-sensitive-interfaces-v1/rule.metadata.json b/rules/exposed-sensitive-interfaces-v1/rule.metadata.json index 17a5910c0..93f5503ae 100644 --- a/rules/exposed-sensitive-interfaces-v1/rule.metadata.json +++ b/rules/exposed-sensitive-interfaces-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "exposed-sensitive-interfaces-v1", "attributes": { "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "armoBuiltin": true, "useFromKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/exposed-sensitive-interfaces/rule.metadata.json b/rules/exposed-sensitive-interfaces/rule.metadata.json index 2f479e9f2..47b74abb7 100644 --- a/rules/exposed-sensitive-interfaces/rule.metadata.json +++ b/rules/exposed-sensitive-interfaces/rule.metadata.json @@ -2,7 +2,6 @@ "name": "exposed-sensitive-interfaces", "attributes": { "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/exposure-to-internet/rule.metadata.json b/rules/exposure-to-internet/rule.metadata.json index 139c3018a..d1357ee94 100644 --- a/rules/exposure-to-internet/rule.metadata.json +++ b/rules/exposure-to-internet/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "exposure-to-internet", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/external-secret-storage/rule.metadata.json b/rules/external-secret-storage/rule.metadata.json index 3285e7250..1701b3ebf 100644 --- a/rules/external-secret-storage/rule.metadata.json +++ b/rules/external-secret-storage/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "external-secret-storage", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/has-image-signature/rule.metadata.json b/rules/has-image-signature/rule.metadata.json index 8dbde72cd..def0f70ab 100644 --- a/rules/has-image-signature/rule.metadata.json +++ b/rules/has-image-signature/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "has-image-signature", "attributes": { - "armoBuiltin": true, "useFromKubescapeVersion": "v2.1.3" }, "ruleLanguage": "Rego", diff --git a/rules/horizontalpodautoscaler-in-default-namespace/rule.metadata.json b/rules/horizontalpodautoscaler-in-default-namespace/rule.metadata.json index fbdcb2e65..c738ef369 100644 --- a/rules/horizontalpodautoscaler-in-default-namespace/rule.metadata.json +++ b/rules/horizontalpodautoscaler-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "horizontalpodautoscaler-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/host-network-access/rule.metadata.json b/rules/host-network-access/rule.metadata.json index 8a2d0e9b0..bc1dbb921 100644 --- a/rules/host-network-access/rule.metadata.json +++ b/rules/host-network-access/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "host-network-access", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/host-pid-ipc-privileges/rule.metadata.json b/rules/host-pid-ipc-privileges/rule.metadata.json index f6522dbd6..5a695c371 100644 --- a/rules/host-pid-ipc-privileges/rule.metadata.json +++ b/rules/host-pid-ipc-privileges/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "host-pid-ipc-privileges", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root/rule.metadata.json b/rules/if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root/rule.metadata.json index b5d05588d..0b2fc6ae1 100644 --- a/rules/if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root/rule.metadata.json +++ b/rules/if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json b/rules/if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json index 84c54aed3..7cd151707 100644 --- a/rules/if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive/rule.metadata.json b/rules/if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive/rule.metadata.json index 38eade5a4..f8847ea77 100644 --- a/rules/if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive/rule.metadata.json +++ b/rules/if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/image-pull-policy-is-not-set-to-always/rule.metadata.json b/rules/image-pull-policy-is-not-set-to-always/rule.metadata.json index cbcb2a069..a67332d2c 100644 --- a/rules/image-pull-policy-is-not-set-to-always/rule.metadata.json +++ b/rules/image-pull-policy-is-not-set-to-always/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "image-pull-policy-is-not-set-to-always", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/immutable-container-filesystem/rule.metadata.json b/rules/immutable-container-filesystem/rule.metadata.json index a70b05ebe..e54bc2588 100644 --- a/rules/immutable-container-filesystem/rule.metadata.json +++ b/rules/immutable-container-filesystem/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "immutable-container-filesystem", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ingress-and-egress-blocked/rule.metadata.json b/rules/ingress-and-egress-blocked/rule.metadata.json index 9f046a281..f986a5af6 100644 --- a/rules/ingress-and-egress-blocked/rule.metadata.json +++ b/rules/ingress-and-egress-blocked/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ingress-and-egress-blocked", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/ingress-in-default-namespace/rule.metadata.json b/rules/ingress-in-default-namespace/rule.metadata.json index 6fdce98a8..6980ce5ce 100644 --- a/rules/ingress-in-default-namespace/rule.metadata.json +++ b/rules/ingress-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "ingress-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/insecure-capabilities/rule.metadata.json b/rules/insecure-capabilities/rule.metadata.json index 7b1f0160b..e85783cef 100644 --- a/rules/insecure-capabilities/rule.metadata.json +++ b/rules/insecure-capabilities/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "insecure-capabilities", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/insecure-port-flag/rule.metadata.json b/rules/insecure-port-flag/rule.metadata.json index e3d78ea4c..d4ec397eb 100644 --- a/rules/insecure-port-flag/rule.metadata.json +++ b/rules/insecure-port-flag/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "insecure-port-flag", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/instance-metadata-api-access/rule.metadata.json b/rules/instance-metadata-api-access/rule.metadata.json index b07f44bf1..6792a26fd 100644 --- a/rules/instance-metadata-api-access/rule.metadata.json +++ b/rules/instance-metadata-api-access/rule.metadata.json @@ -2,7 +2,6 @@ "name": "instance-metadata-api-access", "attributes": { "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/internal-networking/rule.metadata.json b/rules/internal-networking/rule.metadata.json index f5d845bd6..cff78c7fc 100644 --- a/rules/internal-networking/rule.metadata.json +++ b/rules/internal-networking/rule.metadata.json @@ -1,8 +1,7 @@ { "name": "internal-networking", "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/k8s-audit-logs-enabled-cloud/rule.metadata.json b/rules/k8s-audit-logs-enabled-cloud/rule.metadata.json index 21e8d3709..53bc6c5fd 100644 --- a/rules/k8s-audit-logs-enabled-cloud/rule.metadata.json +++ b/rules/k8s-audit-logs-enabled-cloud/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "k8s-audit-logs-enabled-cloud", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/k8s-audit-logs-enabled-native-cis/rule.metadata.json b/rules/k8s-audit-logs-enabled-native-cis/rule.metadata.json index 460ac0966..3e16d2a36 100644 --- a/rules/k8s-audit-logs-enabled-native-cis/rule.metadata.json +++ b/rules/k8s-audit-logs-enabled-native-cis/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "k8s-audit-logs-enabled-native-cis", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/k8s-audit-logs-enabled-native/rule.metadata.json b/rules/k8s-audit-logs-enabled-native/rule.metadata.json index c33ea2d02..9969a5a21 100644 --- a/rules/k8s-audit-logs-enabled-native/rule.metadata.json +++ b/rules/k8s-audit-logs-enabled-native/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "k8s-audit-logs-enabled-native", "attributes": { - "armoBuiltin": true, "resourcesAggregator": "apiserver-pod", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/k8s-common-labels-usage/rule.metadata.json b/rules/k8s-common-labels-usage/rule.metadata.json index fdf0b6bc4..e62f86480 100644 --- a/rules/k8s-common-labels-usage/rule.metadata.json +++ b/rules/k8s-common-labels-usage/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "k8s-common-labels-usage", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/kubelet-authorization-mode-alwaysAllow/rule.metadata.json b/rules/kubelet-authorization-mode-alwaysAllow/rule.metadata.json index 1d744f0cd..b7a8e20d2 100644 --- a/rules/kubelet-authorization-mode-alwaysAllow/rule.metadata.json +++ b/rules/kubelet-authorization-mode-alwaysAllow/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "kubelet-authorization-mode-alwaysAllow", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/kubelet-event-qps/rule.metadata.json b/rules/kubelet-event-qps/rule.metadata.json index f4a351bd6..a28e99e20 100644 --- a/rules/kubelet-event-qps/rule.metadata.json +++ b/rules/kubelet-event-qps/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "kubelet-event-qps", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/kubelet-hostname-override/rule.metadata.json b/rules/kubelet-hostname-override/rule.metadata.json index cb0a7d528..ef39fcb34 100644 --- a/rules/kubelet-hostname-override/rule.metadata.json +++ b/rules/kubelet-hostname-override/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "kubelet-hostname-override", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/kubelet-ip-tables/rule.metadata.json b/rules/kubelet-ip-tables/rule.metadata.json index 7b097aa36..3d9582871 100644 --- a/rules/kubelet-ip-tables/rule.metadata.json +++ b/rules/kubelet-ip-tables/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "kubelet-ip-tables", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/kubelet-protect-kernel-defaults/rule.metadata.json b/rules/kubelet-protect-kernel-defaults/rule.metadata.json index 63d98e80d..885771946 100644 --- a/rules/kubelet-protect-kernel-defaults/rule.metadata.json +++ b/rules/kubelet-protect-kernel-defaults/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "kubelet-protect-kernel-defaults", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/kubelet-rotate-certificates/rule.metadata.json b/rules/kubelet-rotate-certificates/rule.metadata.json index b09a0256b..3efe50cbf 100644 --- a/rules/kubelet-rotate-certificates/rule.metadata.json +++ b/rules/kubelet-rotate-certificates/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "kubelet-rotate-certificates", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/kubelet-rotate-kubelet-server-certificate/rule.metadata.json b/rules/kubelet-rotate-kubelet-server-certificate/rule.metadata.json index 1627f6966..912c3d558 100644 --- a/rules/kubelet-rotate-kubelet-server-certificate/rule.metadata.json +++ b/rules/kubelet-rotate-kubelet-server-certificate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "kubelet-rotate-kubelet-server-certificate", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/kubelet-streaming-connection-idle-timeout/rule.metadata.json b/rules/kubelet-streaming-connection-idle-timeout/rule.metadata.json index d85135e69..49f631623 100644 --- a/rules/kubelet-streaming-connection-idle-timeout/rule.metadata.json +++ b/rules/kubelet-streaming-connection-idle-timeout/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "kubelet-streaming-connection-idle-timeout", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/kubelet-strong-cryptography-ciphers/rule.metadata.json b/rules/kubelet-strong-cryptography-ciphers/rule.metadata.json index b5572a5cf..cbf24c136 100644 --- a/rules/kubelet-strong-cryptography-ciphers/rule.metadata.json +++ b/rules/kubelet-strong-cryptography-ciphers/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "kubelet-strong-cryptographics-ciphers", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/label-usage-for-resources/rule.metadata.json b/rules/label-usage-for-resources/rule.metadata.json index bbcc42e18..e8bd45b4e 100644 --- a/rules/label-usage-for-resources/rule.metadata.json +++ b/rules/label-usage-for-resources/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "label-usage-for-resources", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/lease-in-default-namespace/rule.metadata.json b/rules/lease-in-default-namespace/rule.metadata.json index 6366b597e..4513d3645 100644 --- a/rules/lease-in-default-namespace/rule.metadata.json +++ b/rules/lease-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "lease-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/linux-hardening/rule.metadata.json b/rules/linux-hardening/rule.metadata.json index e04eb97df..4de9f6920 100644 --- a/rules/linux-hardening/rule.metadata.json +++ b/rules/linux-hardening/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "linux-hardening", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/list-all-mutating-webhooks/rule.metadata.json b/rules/list-all-mutating-webhooks/rule.metadata.json index f2fed196a..ee3f697dc 100644 --- a/rules/list-all-mutating-webhooks/rule.metadata.json +++ b/rules/list-all-mutating-webhooks/rule.metadata.json @@ -1,8 +1,7 @@ { "name": "list-all-mutating-webhooks", "attributes": { - "m$K8sThreatMatrix": "Persistence::Validate admission controller", - "armoBuiltin": true + "m$K8sThreatMatrix": "Persistence::Validate admission controller" }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/list-all-namespaces/rule.metadata.json b/rules/list-all-namespaces/rule.metadata.json index 3a09df65e..be6f7dd3d 100644 --- a/rules/list-all-namespaces/rule.metadata.json +++ b/rules/list-all-namespaces/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "list-all-namespaces", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/list-all-validating-webhooks/rule.metadata.json b/rules/list-all-validating-webhooks/rule.metadata.json index d14d41057..f0973e277 100644 --- a/rules/list-all-validating-webhooks/rule.metadata.json +++ b/rules/list-all-validating-webhooks/rule.metadata.json @@ -1,8 +1,7 @@ { "name": "list-all-validating-webhooks", "attributes": { - "m$K8sThreatMatrix": "Credential Access::Validate admission controller", - "armoBuiltin": true + "m$K8sThreatMatrix": "Credential Access::Validate admission controller" }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/list-role-definitions-in-acr/rule.metadata.json b/rules/list-role-definitions-in-acr/rule.metadata.json index 618f868b8..bf558370a 100644 --- a/rules/list-role-definitions-in-acr/rule.metadata.json +++ b/rules/list-role-definitions-in-acr/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "list-role-definitions-in-acr", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "dynamicMatch": [ diff --git a/rules/naked-pods/rule.metadata.json b/rules/naked-pods/rule.metadata.json index 4049345bf..317152ca7 100644 --- a/rules/naked-pods/rule.metadata.json +++ b/rules/naked-pods/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "naked-pods", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/namespace-without-service-account/rule.metadata.json b/rules/namespace-without-service-account/rule.metadata.json index caebdf371..954acb5d0 100644 --- a/rules/namespace-without-service-account/rule.metadata.json +++ b/rules/namespace-without-service-account/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "namespace-without-service-account", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/non-root-containers/rule.metadata.json b/rules/non-root-containers/rule.metadata.json index 7e3a644f9..e43d260cb 100644 --- a/rules/non-root-containers/rule.metadata.json +++ b/rules/non-root-containers/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "non-root-containers", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/persistentvolumeclaim-in-default-namespace/rule.metadata.json b/rules/persistentvolumeclaim-in-default-namespace/rule.metadata.json index aa95aa8cb..8ac640583 100644 --- a/rules/persistentvolumeclaim-in-default-namespace/rule.metadata.json +++ b/rules/persistentvolumeclaim-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "persistentvolumeclaim-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/pod-security-admission-applied-1/rule.metadata.json b/rules/pod-security-admission-applied-1/rule.metadata.json index 73bfc5ac3..41f2762c3 100644 --- a/rules/pod-security-admission-applied-1/rule.metadata.json +++ b/rules/pod-security-admission-applied-1/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "pod-security-admission-applied-1", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/pod-security-admission-applied-2/rule.metadata.json b/rules/pod-security-admission-applied-2/rule.metadata.json index cd339c400..1361e9b1d 100644 --- a/rules/pod-security-admission-applied-2/rule.metadata.json +++ b/rules/pod-security-admission-applied-2/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "pod-security-admission-applied-2", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/pod-security-admission-baseline-applied-1/rule.metadata.json b/rules/pod-security-admission-baseline-applied-1/rule.metadata.json index 017d627d5..1227691ba 100644 --- a/rules/pod-security-admission-baseline-applied-1/rule.metadata.json +++ b/rules/pod-security-admission-baseline-applied-1/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "pod-security-admission-baseline-applied-1", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/pod-security-admission-baseline-applied-2/rule.metadata.json b/rules/pod-security-admission-baseline-applied-2/rule.metadata.json index c6585ec4b..e9ec115f9 100644 --- a/rules/pod-security-admission-baseline-applied-2/rule.metadata.json +++ b/rules/pod-security-admission-baseline-applied-2/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "pod-security-admission-baseline-applied-2", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/pod-security-admission-restricted-applied-1/rule.metadata.json b/rules/pod-security-admission-restricted-applied-1/rule.metadata.json index 1d7d13acb..8b1204ad3 100644 --- a/rules/pod-security-admission-restricted-applied-1/rule.metadata.json +++ b/rules/pod-security-admission-restricted-applied-1/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "pod-security-admission-restricted-applied-1", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/pod-security-admission-restricted-applied-2/rule.metadata.json b/rules/pod-security-admission-restricted-applied-2/rule.metadata.json index ae004d1e1..70c8aaaa8 100644 --- a/rules/pod-security-admission-restricted-applied-2/rule.metadata.json +++ b/rules/pod-security-admission-restricted-applied-2/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "pod-security-admission-restricted-applied-2", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/poddisruptionbudget-in-default-namespace/rule.metadata.json b/rules/poddisruptionbudget-in-default-namespace/rule.metadata.json index f1842b843..e8dd28ec9 100644 --- a/rules/poddisruptionbudget-in-default-namespace/rule.metadata.json +++ b/rules/poddisruptionbudget-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "poddisruptionbudget-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/pods-in-default-namespace/rule.metadata.json b/rules/pods-in-default-namespace/rule.metadata.json index 0a4793b8a..d60f709f5 100644 --- a/rules/pods-in-default-namespace/rule.metadata.json +++ b/rules/pods-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "pods-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/podtemplate-in-default-namespace/rule.metadata.json b/rules/podtemplate-in-default-namespace/rule.metadata.json index af0137234..95e52a409 100644 --- a/rules/podtemplate-in-default-namespace/rule.metadata.json +++ b/rules/podtemplate-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "podtemplate-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/psp-deny-allowed-capabilities/rule.metadata.json b/rules/psp-deny-allowed-capabilities/rule.metadata.json index 703a24c02..7512e8c08 100644 --- a/rules/psp-deny-allowed-capabilities/rule.metadata.json +++ b/rules/psp-deny-allowed-capabilities/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "psp-deny-allowed-capabilities", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/psp-deny-allowprivilegeescalation/rule.metadata.json b/rules/psp-deny-allowprivilegeescalation/rule.metadata.json index 0130c1cb7..4e558e338 100644 --- a/rules/psp-deny-allowprivilegeescalation/rule.metadata.json +++ b/rules/psp-deny-allowprivilegeescalation/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "psp-deny-allowprivilegeescalation", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/psp-deny-hostipc/rule.metadata.json b/rules/psp-deny-hostipc/rule.metadata.json index b40d771e1..08e649706 100644 --- a/rules/psp-deny-hostipc/rule.metadata.json +++ b/rules/psp-deny-hostipc/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "psp-deny-hostipc", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/psp-deny-hostnetwork/rule.metadata.json b/rules/psp-deny-hostnetwork/rule.metadata.json index c73ed47f2..3940d038f 100644 --- a/rules/psp-deny-hostnetwork/rule.metadata.json +++ b/rules/psp-deny-hostnetwork/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "psp-deny-hostnetwork", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/psp-deny-hostpid/rule.metadata.json b/rules/psp-deny-hostpid/rule.metadata.json index 09c49ce67..6913cc1dd 100644 --- a/rules/psp-deny-hostpid/rule.metadata.json +++ b/rules/psp-deny-hostpid/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "psp-deny-hostpid", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/psp-deny-privileged-container/rule.metadata.json b/rules/psp-deny-privileged-container/rule.metadata.json index 3a5bb8a6b..3da2bf6cb 100644 --- a/rules/psp-deny-privileged-container/rule.metadata.json +++ b/rules/psp-deny-privileged-container/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "psp-deny-privileged-container", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/psp-deny-root-container/rule.metadata.json b/rules/psp-deny-root-container/rule.metadata.json index e93b72109..d968bb14a 100644 --- a/rules/psp-deny-root-container/rule.metadata.json +++ b/rules/psp-deny-root-container/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "psp-deny-root-container", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/psp-enabled-cloud/rule.metadata.json b/rules/psp-enabled-cloud/rule.metadata.json index 69bd842f8..fdd126f0f 100644 --- a/rules/psp-enabled-cloud/rule.metadata.json +++ b/rules/psp-enabled-cloud/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "psp-enabled-cloud", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/psp-enabled-native/rule.metadata.json b/rules/psp-enabled-native/rule.metadata.json index e4935133d..9b8c1fe5b 100644 --- a/rules/psp-enabled-native/rule.metadata.json +++ b/rules/psp-enabled-native/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "psp-enabled-native", "attributes": { - "armoBuiltin": true, "resourcesAggregator": "apiserver-pod", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/psp-required-drop-capabilities/rule.metadata.json b/rules/psp-required-drop-capabilities/rule.metadata.json index c4dc3964f..42b92bdb9 100644 --- a/rules/psp-required-drop-capabilities/rule.metadata.json +++ b/rules/psp-required-drop-capabilities/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "psp-required-drop-capabilities", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/rbac-enabled-cloud/rule.metadata.json b/rules/rbac-enabled-cloud/rule.metadata.json index a787f6d9c..ef10ac751 100644 --- a/rules/rbac-enabled-cloud/rule.metadata.json +++ b/rules/rbac-enabled-cloud/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rbac-enabled-cloud", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/rbac-enabled-native/rule.metadata.json b/rules/rbac-enabled-native/rule.metadata.json index 7c8b1e447..f90f3b852 100644 --- a/rules/rbac-enabled-native/rule.metadata.json +++ b/rules/rbac-enabled-native/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rbac-enabled-native", "attributes": { - "armoBuiltin": true, "resourcesAggregator": "apiserver-pod", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/read-only-port-enabled-updated/rule.metadata.json b/rules/read-only-port-enabled-updated/rule.metadata.json index 375539cca..62867daf6 100644 --- a/rules/read-only-port-enabled-updated/rule.metadata.json +++ b/rules/read-only-port-enabled-updated/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "read-only-port-enabled-updated", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/replicationcontroller-in-default-namespace/rule.metadata.json b/rules/replicationcontroller-in-default-namespace/rule.metadata.json index e4be5ff2e..1ccbb7fdb 100644 --- a/rules/replicationcontroller-in-default-namespace/rule.metadata.json +++ b/rules/replicationcontroller-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "replicationcontroller-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/resource-policies/rule.metadata.json b/rules/resource-policies/rule.metadata.json index bd1d1e82d..fd045110e 100644 --- a/rules/resource-policies/rule.metadata.json +++ b/rules/resource-policies/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "resource-policies", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/resources-cpu-limit-and-request/rule.metadata.json b/rules/resources-cpu-limit-and-request/rule.metadata.json index 26fda2dfe..6ece8ff98 100644 --- a/rules/resources-cpu-limit-and-request/rule.metadata.json +++ b/rules/resources-cpu-limit-and-request/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "resources-cpu-limit-and-request", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/resources-memory-limit-and-request/rule.metadata.json b/rules/resources-memory-limit-and-request/rule.metadata.json index 8505c889b..4813e61b3 100644 --- a/rules/resources-memory-limit-and-request/rule.metadata.json +++ b/rules/resources-memory-limit-and-request/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "resources-memory-limit-and-request", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/resources-secret-in-default-namespace/rule.metadata.json b/rules/resources-secret-in-default-namespace/rule.metadata.json index 1ee07168d..8f91c04d1 100644 --- a/rules/resources-secret-in-default-namespace/rule.metadata.json +++ b/rules/resources-secret-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "resources-secret-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/restrict-access-to-the-control-plane-endpoint/rule.metadata.json b/rules/restrict-access-to-the-control-plane-endpoint/rule.metadata.json index 03184e004..876ad9532 100644 --- a/rules/restrict-access-to-the-control-plane-endpoint/rule.metadata.json +++ b/rules/restrict-access-to-the-control-plane-endpoint/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "restrict-access-to-the-control-plane-endpoint", "attributes": { - "armoBuiltin": true, "hostSensorRule": "false", "imageScanRelated": false }, diff --git a/rules/review-roles-with-aws-iam-authenticator/rule.metadata.json b/rules/review-roles-with-aws-iam-authenticator/rule.metadata.json index ebc0ee4d8..4a8f6ed9d 100644 --- a/rules/review-roles-with-aws-iam-authenticator/rule.metadata.json +++ b/rules/review-roles-with-aws-iam-authenticator/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "review-roles-with-aws-iam-authenticator", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/role-in-default-namespace/rule.metadata.json b/rules/role-in-default-namespace/rule.metadata.json index 68590ff63..64d738baf 100644 --- a/rules/role-in-default-namespace/rule.metadata.json +++ b/rules/role-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "role-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/rolebinding-in-default-namespace/rule.metadata.json b/rules/rolebinding-in-default-namespace/rule.metadata.json index 2e3ae4185..09fa0ad0c 100644 --- a/rules/rolebinding-in-default-namespace/rule.metadata.json +++ b/rules/rolebinding-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rolebinding-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/rule-access-dashboard-subject-v1/rule.metadata.json b/rules/rule-access-dashboard-subject-v1/rule.metadata.json index 9201de9a8..1008a5bb0 100644 --- a/rules/rule-access-dashboard-subject-v1/rule.metadata.json +++ b/rules/rule-access-dashboard-subject-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-access-dashboard-subject-v1", "attributes": { "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/rule-access-dashboard-wl-v1/rule.metadata.json b/rules/rule-access-dashboard-wl-v1/rule.metadata.json index 7f91b502a..f51c33a3f 100644 --- a/rules/rule-access-dashboard-wl-v1/rule.metadata.json +++ b/rules/rule-access-dashboard-wl-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-access-dashboard-wl-v1", "attributes": { "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, "useFromKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-access-dashboard/rule.metadata.json b/rules/rule-access-dashboard/rule.metadata.json index 9c5b34a68..5378febee 100644 --- a/rules/rule-access-dashboard/rule.metadata.json +++ b/rules/rule-access-dashboard/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-access-dashboard", "attributes": { "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-allow-privilege-escalation/rule.metadata.json b/rules/rule-allow-privilege-escalation/rule.metadata.json index 0d18f46af..93cebb4d3 100644 --- a/rules/rule-allow-privilege-escalation/rule.metadata.json +++ b/rules/rule-allow-privilege-escalation/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rule-allow-privilege-escalation", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/rule-can-bind-escalate/rule.metadata.json b/rules/rule-can-bind-escalate/rule.metadata.json index dcbc9e19f..a69cce772 100644 --- a/rules/rule-can-bind-escalate/rule.metadata.json +++ b/rules/rule-can-bind-escalate/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rule-can-bind-escalate", "attributes": { - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/rule-can-create-pod/rule.metadata.json b/rules/rule-can-create-pod/rule.metadata.json index 9527c346e..a8866e00c 100644 --- a/rules/rule-can-create-pod/rule.metadata.json +++ b/rules/rule-can-create-pod/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rule-can-create-pod", "attributes": { - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/rule-can-delete-k8s-events-v1/rule.metadata.json b/rules/rule-can-delete-k8s-events-v1/rule.metadata.json index 533373171..fc24a7372 100644 --- a/rules/rule-can-delete-k8s-events-v1/rule.metadata.json +++ b/rules/rule-can-delete-k8s-events-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-can-delete-k8s-events-v1", "attributes": { "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/rule-can-delete-k8s-events/rule.metadata.json b/rules/rule-can-delete-k8s-events/rule.metadata.json index 4f3cf14b0..797eb1ea8 100644 --- a/rules/rule-can-delete-k8s-events/rule.metadata.json +++ b/rules/rule-can-delete-k8s-events/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-can-delete-k8s-events", "attributes": { "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-can-impersonate-users-groups-v1/rule.metadata.json b/rules/rule-can-impersonate-users-groups-v1/rule.metadata.json index 2e7483c5e..49d368545 100644 --- a/rules/rule-can-impersonate-users-groups-v1/rule.metadata.json +++ b/rules/rule-can-impersonate-users-groups-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-can-impersonate-users-groups-v1", "attributes": { "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/rule-can-impersonate-users-groups/rule.metadata.json b/rules/rule-can-impersonate-users-groups/rule.metadata.json index 67dfa8481..88ba455c0 100644 --- a/rules/rule-can-impersonate-users-groups/rule.metadata.json +++ b/rules/rule-can-impersonate-users-groups/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-can-impersonate-users-groups", "attributes": { "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-can-list-get-secrets-v1/rule.metadata.json b/rules/rule-can-list-get-secrets-v1/rule.metadata.json index c0fd2011c..982613f2e 100644 --- a/rules/rule-can-list-get-secrets-v1/rule.metadata.json +++ b/rules/rule-can-list-get-secrets-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-can-list-get-secrets-v1", "attributes": { "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/rule-can-list-get-secrets/rule.metadata.json b/rules/rule-can-list-get-secrets/rule.metadata.json index bc30fbc08..a6824d60f 100644 --- a/rules/rule-can-list-get-secrets/rule.metadata.json +++ b/rules/rule-can-list-get-secrets/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-can-list-get-secrets", "attributes": { "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-can-portforward-v1/rule.metadata.json b/rules/rule-can-portforward-v1/rule.metadata.json index 68e075d5e..9e192d476 100644 --- a/rules/rule-can-portforward-v1/rule.metadata.json +++ b/rules/rule-can-portforward-v1/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rule-can-portforward-v1", "attributes": { - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/rule-can-portforward/rule.metadata.json b/rules/rule-can-portforward/rule.metadata.json index 03b928883..6fe58a54e 100644 --- a/rules/rule-can-portforward/rule.metadata.json +++ b/rules/rule-can-portforward/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rule-can-portforward", "attributes": { - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-can-ssh-to-pod-v1/rule.metadata.json b/rules/rule-can-ssh-to-pod-v1/rule.metadata.json index a30a839c4..db4d5f691 100644 --- a/rules/rule-can-ssh-to-pod-v1/rule.metadata.json +++ b/rules/rule-can-ssh-to-pod-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-can-ssh-to-pod-v1", "attributes": { "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "armoBuiltin": true, "useFromKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-can-ssh-to-pod/rule.metadata.json b/rules/rule-can-ssh-to-pod/rule.metadata.json index af170a8f3..5d2cc8311 100644 --- a/rules/rule-can-ssh-to-pod/rule.metadata.json +++ b/rules/rule-can-ssh-to-pod/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-can-ssh-to-pod", "attributes": { "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-can-update-configmap-v1/rule.metadata.json b/rules/rule-can-update-configmap-v1/rule.metadata.json index 083f78b7c..18ec03fbe 100644 --- a/rules/rule-can-update-configmap-v1/rule.metadata.json +++ b/rules/rule-can-update-configmap-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-can-update-configmap-v1", "attributes": { "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/rule-can-update-configmap/rule.metadata.json b/rules/rule-can-update-configmap/rule.metadata.json index 7ae9f1e5f..bfd5344e4 100644 --- a/rules/rule-can-update-configmap/rule.metadata.json +++ b/rules/rule-can-update-configmap/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-can-update-configmap", "attributes": { "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-cni-enabled-aks/rule.metadata.json b/rules/rule-cni-enabled-aks/rule.metadata.json index c47a98f31..cefcfe8e4 100644 --- a/rules/rule-cni-enabled-aks/rule.metadata.json +++ b/rules/rule-cni-enabled-aks/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rule-cni-enabled-aks", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "dynamicMatch": [ diff --git a/rules/rule-credentials-configmap/rule.metadata.json b/rules/rule-credentials-configmap/rule.metadata.json index cbd03fb57..8091db54e 100644 --- a/rules/rule-credentials-configmap/rule.metadata.json +++ b/rules/rule-credentials-configmap/rule.metadata.json @@ -1,8 +1,7 @@ { "name": "rule-credentials-configmap", "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/rule-credentials-in-env-var/rule.metadata.json b/rules/rule-credentials-in-env-var/rule.metadata.json index d5735acfb..8a52cefa1 100644 --- a/rules/rule-credentials-in-env-var/rule.metadata.json +++ b/rules/rule-credentials-in-env-var/rule.metadata.json @@ -1,8 +1,7 @@ { "name": "rule-credentials-in-env-var", "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/rule-deny-cronjobs/rule.metadata.json b/rules/rule-deny-cronjobs/rule.metadata.json index 0b8250ab2..c53c1d7b1 100644 --- a/rules/rule-deny-cronjobs/rule.metadata.json +++ b/rules/rule-deny-cronjobs/rule.metadata.json @@ -1,8 +1,7 @@ { "name": "rule-deny-cronjobs", "attributes": { - "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob", - "armoBuiltin": true + "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob" }, "ruleLanguage": "rego", "match": [ diff --git a/rules/rule-excessive-delete-rights-v1/rule.metadata.json b/rules/rule-excessive-delete-rights-v1/rule.metadata.json index 6f86e94b0..14138cc0e 100644 --- a/rules/rule-excessive-delete-rights-v1/rule.metadata.json +++ b/rules/rule-excessive-delete-rights-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-excessive-delete-rights-v1", "attributes": { "m$K8sThreatMatrix": "Impact::Data Destruction", - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/rule-excessive-delete-rights/rule.metadata.json b/rules/rule-excessive-delete-rights/rule.metadata.json index 937e53927..dc33d12d8 100644 --- a/rules/rule-excessive-delete-rights/rule.metadata.json +++ b/rules/rule-excessive-delete-rights/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-excessive-delete-rights", "attributes": { "m$K8sThreatMatrix": "Impact::Data Destruction", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-hostile-multitenant-workloads/rule.metadata.json b/rules/rule-hostile-multitenant-workloads/rule.metadata.json index ab991b994..efc913134 100644 --- a/rules/rule-hostile-multitenant-workloads/rule.metadata.json +++ b/rules/rule-hostile-multitenant-workloads/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rule-hostile-multitenant-workloads", "attributes": { - "armoBuiltin": true, "actionRequired": "manual review" }, "ruleLanguage": "Rego", diff --git a/rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json b/rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json index cd97c0ded..5a3221ad4 100644 --- a/rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json +++ b/rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-identify-blocklisted-image-registries-v1", "attributes": { "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "armoBuiltin": true, "useFromKubescapeVersion": "v2.9.0" }, "ruleLanguage": "Rego", diff --git a/rules/rule-identify-blocklisted-image-registries/rule.metadata.json b/rules/rule-identify-blocklisted-image-registries/rule.metadata.json index e52b4765f..7537a8bc1 100644 --- a/rules/rule-identify-blocklisted-image-registries/rule.metadata.json +++ b/rules/rule-identify-blocklisted-image-registries/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-identify-blocklisted-image-registries", "attributes": { "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "armoBuiltin": true, "useUntilKubescapeVersion": "v2.3.8" }, "ruleLanguage": "Rego", diff --git a/rules/rule-identify-old-k8s-registry/rule.metadata.json b/rules/rule-identify-old-k8s-registry/rule.metadata.json index 985f67387..569e22f9d 100644 --- a/rules/rule-identify-old-k8s-registry/rule.metadata.json +++ b/rules/rule-identify-old-k8s-registry/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-identify-old-k8s-registry", "attributes": { "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/rule-list-all-cluster-admins-v1/rule.metadata.json b/rules/rule-list-all-cluster-admins-v1/rule.metadata.json index 1bd7d2fb7..7b325dc97 100644 --- a/rules/rule-list-all-cluster-admins-v1/rule.metadata.json +++ b/rules/rule-list-all-cluster-admins-v1/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-list-all-cluster-admins-v1", "attributes": { "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, diff --git a/rules/rule-list-all-cluster-admins/rule.metadata.json b/rules/rule-list-all-cluster-admins/rule.metadata.json index 54777a54f..3cf574f7e 100644 --- a/rules/rule-list-all-cluster-admins/rule.metadata.json +++ b/rules/rule-list-all-cluster-admins/rule.metadata.json @@ -2,7 +2,6 @@ "name": "rule-list-all-cluster-admins", "attributes": { "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, "useUntilKubescapeVersion": "v1.0.133" }, "ruleLanguage": "Rego", diff --git a/rules/rule-manual/rule.metadata.json b/rules/rule-manual/rule.metadata.json index fa9f407a5..8dd9fecb5 100644 --- a/rules/rule-manual/rule.metadata.json +++ b/rules/rule-manual/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rule-manual", "attributes": { - "armoBuiltin": true, "actionRequired": "manual review", "hostSensorRule": false, "imageScanRelated": false diff --git a/rules/rule-privileged-container/rule.metadata.json b/rules/rule-privileged-container/rule.metadata.json index 1cca4e5b6..02fabb95d 100644 --- a/rules/rule-privileged-container/rule.metadata.json +++ b/rules/rule-privileged-container/rule.metadata.json @@ -4,7 +4,6 @@ "m$K8sThreatMatrix": "Privilege Escalation::privileged container", "mitre": "Privilege Escalation", "mitreCode": "TA0004", - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/rule-secrets-in-env-var/rule.metadata.json b/rules/rule-secrets-in-env-var/rule.metadata.json index 13dd42259..4d0f371f4 100644 --- a/rules/rule-secrets-in-env-var/rule.metadata.json +++ b/rules/rule-secrets-in-env-var/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "rule-secrets-in-env-var", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/secret-etcd-encryption-cloud/rule.metadata.json b/rules/secret-etcd-encryption-cloud/rule.metadata.json index 3284d55bb..62c09f478 100644 --- a/rules/secret-etcd-encryption-cloud/rule.metadata.json +++ b/rules/secret-etcd-encryption-cloud/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "secret-etcd-encryption-cloud", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/service-in-default-namespace/rule.metadata.json b/rules/service-in-default-namespace/rule.metadata.json index a7704d894..35b0db8b6 100644 --- a/rules/service-in-default-namespace/rule.metadata.json +++ b/rules/service-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "service-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/serviceaccount-in-default-namespace/rule.metadata.json b/rules/serviceaccount-in-default-namespace/rule.metadata.json index 1bb661408..b9aeeca92 100644 --- a/rules/serviceaccount-in-default-namespace/rule.metadata.json +++ b/rules/serviceaccount-in-default-namespace/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "serviceaccount-in-default-namespace", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/serviceaccount-token-mount/rule.metadata.json b/rules/serviceaccount-token-mount/rule.metadata.json index c10e5c9f2..ccd71ddb5 100644 --- a/rules/serviceaccount-token-mount/rule.metadata.json +++ b/rules/serviceaccount-token-mount/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "serviceaccount-token-mount", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/set-fsgroup-value/rule.metadata.json b/rules/set-fsgroup-value/rule.metadata.json index 7d87dabe6..9273da89b 100644 --- a/rules/set-fsgroup-value/rule.metadata.json +++ b/rules/set-fsgroup-value/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "set-fsgroup-value", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/set-fsgroupchangepolicy-value/rule.metadata.json b/rules/set-fsgroupchangepolicy-value/rule.metadata.json index 1ccae4723..6838ceb4a 100644 --- a/rules/set-fsgroupchangepolicy-value/rule.metadata.json +++ b/rules/set-fsgroupchangepolicy-value/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "set-fsgroupchangepolicy-value", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/set-procmount-default/rule.metadata.json b/rules/set-procmount-default/rule.metadata.json index 33aa94924..7131eaf96 100644 --- a/rules/set-procmount-default/rule.metadata.json +++ b/rules/set-procmount-default/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "set-procmount-default", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/set-seLinuxOptions/rule.metadata.json b/rules/set-seLinuxOptions/rule.metadata.json index 6ad114530..0b2cd07a0 100644 --- a/rules/set-seLinuxOptions/rule.metadata.json +++ b/rules/set-seLinuxOptions/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "set-seLinuxOptions", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/set-seccomp-profile-RuntimeDefault/rule.metadata.json b/rules/set-seccomp-profile-RuntimeDefault/rule.metadata.json index 77e921ca9..4bc785163 100644 --- a/rules/set-seccomp-profile-RuntimeDefault/rule.metadata.json +++ b/rules/set-seccomp-profile-RuntimeDefault/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "set-seccomp-profile-RuntimeDefault", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/set-seccomp-profile/rule.metadata.json b/rules/set-seccomp-profile/rule.metadata.json index a40733f05..90a5665dd 100644 --- a/rules/set-seccomp-profile/rule.metadata.json +++ b/rules/set-seccomp-profile/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "set-seccomp-profile", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/set-supplementalgroups-values/rule.metadata.json b/rules/set-supplementalgroups-values/rule.metadata.json index dfa1c6f8d..00f487e9a 100644 --- a/rules/set-supplementalgroups-values/rule.metadata.json +++ b/rules/set-supplementalgroups-values/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "set-supplementalgroups-values", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/set-sysctls-params/rule.metadata.json b/rules/set-sysctls-params/rule.metadata.json index 3fbba969a..82b5ea73e 100644 --- a/rules/set-sysctls-params/rule.metadata.json +++ b/rules/set-sysctls-params/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "set-sysctls-params", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/sudo-in-container-entrypoint/rule.metadata.json b/rules/sudo-in-container-entrypoint/rule.metadata.json index 2167c56c6..8f11108a8 100644 --- a/rules/sudo-in-container-entrypoint/rule.metadata.json +++ b/rules/sudo-in-container-entrypoint/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "sudo-in-container-entrypoint", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/validate-kubelet-tls-configuration-updated/rule.metadata.json b/rules/validate-kubelet-tls-configuration-updated/rule.metadata.json index 5aefd730a..187ad5317 100644 --- a/rules/validate-kubelet-tls-configuration-updated/rule.metadata.json +++ b/rules/validate-kubelet-tls-configuration-updated/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "validate-kubelet-tls-configuration-updated", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "ruleLanguage": "Rego", diff --git a/rules/verify-image-signature/rule.metadata.json b/rules/verify-image-signature/rule.metadata.json index 486bab218..3abb3180c 100644 --- a/rules/verify-image-signature/rule.metadata.json +++ b/rules/verify-image-signature/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "verify-image-signature", "attributes": { - "armoBuiltin": true, "useFromKubescapeVersion": "v2.1.3" }, "ruleLanguage": "Rego", diff --git a/rules/workload-mounted-configmap/rule.metadata.json b/rules/workload-mounted-configmap/rule.metadata.json index acef14c9c..1a685ed22 100644 --- a/rules/workload-mounted-configmap/rule.metadata.json +++ b/rules/workload-mounted-configmap/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "workload-mounted-configmap", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/workload-mounted-pvc/rule.metadata.json b/rules/workload-mounted-pvc/rule.metadata.json index dc22b0543..28b3e7e35 100644 --- a/rules/workload-mounted-pvc/rule.metadata.json +++ b/rules/workload-mounted-pvc/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "workload-mounted-pvc", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/workload-mounted-secrets/rule.metadata.json b/rules/workload-mounted-secrets/rule.metadata.json index 8f561c805..0c97783b6 100644 --- a/rules/workload-mounted-secrets/rule.metadata.json +++ b/rules/workload-mounted-secrets/rule.metadata.json @@ -1,7 +1,6 @@ { "name": "workload-mounted-secrets", "attributes": { - "armoBuiltin": true }, "ruleLanguage": "Rego", "match": [ diff --git a/scripts/init-rule.py b/scripts/init-rule.py index 86a0d9480..279e6b9aa 100644 --- a/scripts/init-rule.py +++ b/scripts/init-rule.py @@ -54,7 +54,6 @@ rule_metadata = """{{ "name": "{rule_name}", "attributes": {{ - "armoBuiltin": true,{use_from_kubescape_version}{use_until_kubescape_version} "hostSensorRule": "{host_sensor_rule}", "imageScanRelated": {image_scan_related} }}, From 9bed529a8777c08cacb448fcb7f7c4a2c6d54ada Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Thu, 23 Nov 2023 15:55:03 +0200 Subject: [PATCH 039/195] modified: .github/workflows/pr-tests.yaml --- .github/workflows/pr-tests.yaml | 40 +++++++++------------------------ 1 file changed, 10 insertions(+), 30 deletions(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index b507ea281..4a6c1f82e 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -3,10 +3,7 @@ on: push: branches: [ master, main ] pull_request: - # run for every chnage in the PR types: [ opened, synchronize, reopened, ready_for_review ] - # Do not run the pipeline if only Markdown files changed - # paths-ignore: ['**.md'] concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -15,43 +12,27 @@ concurrency: env: REGO_ARTIFACT_KEY_NAME: rego_artifact REGO_ARTIFACT_PATH: releaseDev + GH_ACCESS_TOKEN: ${{ secrets.ARMOSEC_GITHUB_ACCESS_TOKEN }} jobs: # testing link checks markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - - name: Check links - uses: gaurav-nelson/github-action-markdown-link-check@5c5dfc0ac2e225883c0e5f03a85311ec2830d368 + - uses: actions/checkout@v4 with: - use-verbose-mode: 'yes' + token: ${{ env.GH_ACCESS_TOKEN }} # main job of testing and building the env. test_pr_checks: - # needs: [markdown-link-check] permissions: pull-requests: write uses: kubescape/workflows/.github/workflows/go-basic-tests.yaml@main with: - GO_VERSION: 1.19 + GO_VERSION: 1.20 BUILD_PATH: github.com/kubescape/regolibrary/gitregostore/... secrets: inherit -# test-coverage: -# needs: [test_pr_checks] -# uses: kubescape/workflows/.github/workflows/coverage-check.yaml@main -# if: | -# ${{ (always() && -# (contains(needs.*.result, 'success')) && -# !(contains(needs.*.result, 'skipped')) && -# !(contains(needs.*.result, 'failure')) && -# !(contains(needs.*.result, 'cancelled'))) }} -# with: -# COVERAGELIMIT: "58" -# SHA: ${{ github.sha }} - - build-and-rego-test: name: Build and test rego artifacts runs-on: ubuntu-latest @@ -61,21 +42,20 @@ jobs: !(contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }} - # needs: [test_pr_checks] outputs: REGO_ARTIFACT_KEY_NAME: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_KEY_NAME }} REGO_ARTIFACT_PATH: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_PATH }} steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f + - uses: actions/checkout@v4 name: checkout repo content with: - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + token: ${{ env.GH_ACCESS_TOKEN }} # Test using Golang OPA hot rule compilation - name: Set up Go - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 + uses: actions/setup-go@v4 with: - go-version: 1.19 + go-version: 1.20 # testing rego library - name: Test Regoes @@ -159,9 +139,9 @@ jobs: runs-on: ubuntu-latest needs: [ks-and-rego-test] steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f + - uses: actions/checkout@v4 name: checkout repo content with: - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + token: ${{ env.GH_ACCESS_TOKEN }} - name: Remove pre-release folder run: rm -r -f pre-release From 707380dc320537d8833b21a77518a0c358c658b6 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Thu, 23 Nov 2023 15:59:04 +0200 Subject: [PATCH 040/195] Update pr-tests.yaml Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- .github/workflows/pr-tests.yaml | 50 ++++++++++----------------------- 1 file changed, 15 insertions(+), 35 deletions(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index ff440c71c..4a6c1f82e 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -3,10 +3,7 @@ on: push: branches: [ master, main ] pull_request: - # run for every chnage in the PR types: [ opened, synchronize, reopened, ready_for_review ] - # Do not run the pipeline if only Markdown files changed - # paths-ignore: ['**.md'] concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -15,43 +12,27 @@ concurrency: env: REGO_ARTIFACT_KEY_NAME: rego_artifact REGO_ARTIFACT_PATH: releaseDev + GH_ACCESS_TOKEN: ${{ secrets.ARMOSEC_GITHUB_ACCESS_TOKEN }} jobs: - # # testing link checks - # markdown-link-check: - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - # - name: Check links - # uses: gaurav-nelson/github-action-markdown-link-check@5c5dfc0ac2e225883c0e5f03a85311ec2830d368 - # with: - # use-verbose-mode: 'yes' + # testing link checks + markdown-link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + token: ${{ env.GH_ACCESS_TOKEN }} # main job of testing and building the env. test_pr_checks: - # needs: [markdown-link-check] permissions: pull-requests: write uses: kubescape/workflows/.github/workflows/go-basic-tests.yaml@main with: - GO_VERSION: 1.19 + GO_VERSION: 1.20 BUILD_PATH: github.com/kubescape/regolibrary/gitregostore/... secrets: inherit -# test-coverage: -# needs: [test_pr_checks] -# uses: kubescape/workflows/.github/workflows/coverage-check.yaml@main -# if: | -# ${{ (always() && -# (contains(needs.*.result, 'success')) && -# !(contains(needs.*.result, 'skipped')) && -# !(contains(needs.*.result, 'failure')) && -# !(contains(needs.*.result, 'cancelled'))) }} -# with: -# COVERAGELIMIT: "58" -# SHA: ${{ github.sha }} - - build-and-rego-test: name: Build and test rego artifacts runs-on: ubuntu-latest @@ -61,21 +42,20 @@ jobs: !(contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }} - # needs: [test_pr_checks] outputs: REGO_ARTIFACT_KEY_NAME: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_KEY_NAME }} REGO_ARTIFACT_PATH: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_PATH }} steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f + - uses: actions/checkout@v4 name: checkout repo content with: - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + token: ${{ env.GH_ACCESS_TOKEN }} # Test using Golang OPA hot rule compilation - name: Set up Go - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 + uses: actions/setup-go@v4 with: - go-version: 1.19 + go-version: 1.20 # testing rego library - name: Test Regoes @@ -159,9 +139,9 @@ jobs: runs-on: ubuntu-latest needs: [ks-and-rego-test] steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f + - uses: actions/checkout@v4 name: checkout repo content with: - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + token: ${{ env.GH_ACCESS_TOKEN }} - name: Remove pre-release folder run: rm -r -f pre-release From 9d7e87291839f3f2f39bff38206e7e8f7f175b6c Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Sun, 26 Nov 2023 10:13:52 +0200 Subject: [PATCH 041/195] fix for the pr --- .github/workflows/pr-tests.yaml | 24 ++---------------------- 1 file changed, 2 insertions(+), 22 deletions(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 13a315d7f..6ec1d4b52 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -15,28 +15,12 @@ env: GH_ACCESS_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} jobs: - # testing link checks - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - token: ${{ env.GH_ACCESS_TOKEN }} - # testing link checks - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - token: ${{ env.GH_ACCESS_TOKEN }} - # main job of testing and building the env. test_pr_checks: permissions: pull-requests: write uses: kubescape/workflows/.github/workflows/go-basic-tests.yaml@main with: - GO_VERSION: 1.20 GO_VERSION: 1.20 BUILD_PATH: github.com/kubescape/regolibrary/gitregostore/... secrets: inherit @@ -59,21 +43,18 @@ jobs: name: checkout repo content with: token: ${{ env.GH_ACCESS_TOKEN }} - token: ${{ env.GH_ACCESS_TOKEN }} # Test using Golang OPA hot rule compilation - name: Set up Go - uses: actions/setup-go@v4 uses: actions/setup-go@v4 with: go-version: 1.20 - go-version: 1.20 # testing rego library - name: Test Regoes working-directory: testrunner run: | - apt update && apt install -y cmake + apt update && apt install -y cmake GOPATH=$(go env GOPATH) make - name: Set up Regal @@ -156,6 +137,5 @@ jobs: name: checkout repo content with: token: ${{ env.GH_ACCESS_TOKEN }} - token: ${{ env.GH_ACCESS_TOKEN }} - name: Remove pre-release folder - run: rm -r -f pre-release + run: rm -r -f pre-release \ No newline at end of file From 9d9c5bad3fbd32edd5e53b074cb09d3ec3ea921c Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Sun, 26 Nov 2023 10:18:42 +0200 Subject: [PATCH 042/195] updating yaml --- .github/workflows/pr-tests.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 6ec1d4b52..3c68901ea 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -13,6 +13,7 @@ env: REGO_ARTIFACT_KEY_NAME: rego_artifact REGO_ARTIFACT_PATH: releaseDev GH_ACCESS_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + GO_VERSION: 1.20 jobs: # main job of testing and building the env. @@ -38,7 +39,6 @@ jobs: REGO_ARTIFACT_KEY_NAME: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_KEY_NAME }} REGO_ARTIFACT_PATH: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_PATH }} steps: - - uses: actions/checkout@v4 - uses: actions/checkout@v4 name: checkout repo content with: @@ -60,7 +60,7 @@ jobs: - name: Set up Regal uses: StyraInc/setup-regal@v0.1.0 with: - version: v0.10.1 + version: v0.13.0 - name: Lint Rego run: regal lint --format github rules @@ -132,7 +132,6 @@ jobs: runs-on: ubuntu-latest needs: [ks-and-rego-test] steps: - - uses: actions/checkout@v4 - uses: actions/checkout@v4 name: checkout repo content with: From 3518c8916b75d16647b50c12dfb297399127f1c4 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Sun, 26 Nov 2023 10:19:10 +0200 Subject: [PATCH 043/195] remove go --- .github/workflows/pr-tests.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 3c68901ea..d8b9c10d6 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -13,7 +13,6 @@ env: REGO_ARTIFACT_KEY_NAME: rego_artifact REGO_ARTIFACT_PATH: releaseDev GH_ACCESS_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - GO_VERSION: 1.20 jobs: # main job of testing and building the env. From 3e0301945c0c3fbfe58eff44a7a643418a1a460a Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Sun, 26 Nov 2023 14:04:27 +0200 Subject: [PATCH 044/195] Update pr-tests.yaml Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- .github/workflows/pr-tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index d8b9c10d6..1d5ec773c 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -21,7 +21,7 @@ jobs: pull-requests: write uses: kubescape/workflows/.github/workflows/go-basic-tests.yaml@main with: - GO_VERSION: 1.20 + GO_VERSION: '1.20' BUILD_PATH: github.com/kubescape/regolibrary/gitregostore/... secrets: inherit @@ -136,4 +136,4 @@ jobs: with: token: ${{ env.GH_ACCESS_TOKEN }} - name: Remove pre-release folder - run: rm -r -f pre-release \ No newline at end of file + run: rm -r -f pre-release From 2effcf5cf0796142cc5b43d8efa56ae14e80ee40 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Sun, 26 Nov 2023 14:10:49 +0200 Subject: [PATCH 045/195] fix ver --- .github/workflows/pr-tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 1d5ec773c..239dab639 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -53,13 +53,13 @@ jobs: - name: Test Regoes working-directory: testrunner run: | - apt update && apt install -y cmake + apt update && apt install -y cmake GOPATH=$(go env GOPATH) make - name: Set up Regal uses: StyraInc/setup-regal@v0.1.0 with: - version: v0.13.0 + version: v0.10.1 - name: Lint Rego run: regal lint --format github rules From 09012e37566f10ea817651246c36c1b1df82ee7e Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Sun, 26 Nov 2023 14:24:19 +0200 Subject: [PATCH 046/195] update --- .github/workflows/pr-tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 239dab639..6aefb1c54 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -53,7 +53,7 @@ jobs: - name: Test Regoes working-directory: testrunner run: | - apt update && apt install -y cmake + sudo apt update && apt install -y cmake GOPATH=$(go env GOPATH) make - name: Set up Regal From 2c36351ce24241f61c01bcf0db2f3c0dc66c7c40 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Sun, 26 Nov 2023 14:30:32 +0200 Subject: [PATCH 047/195] modified: .github/workflows/pr-tests.yaml --- .github/workflows/pr-tests.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 6aefb1c54..667e55208 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -53,7 +53,12 @@ jobs: - name: Test Regoes working-directory: testrunner run: | - sudo apt update && apt install -y cmake + for i in {1..5}; do + sudo apt update && break || sleep 15; + done + for i in {1..5}; do + sudo apt install -y cmake && break || sleep 15; + done GOPATH=$(go env GOPATH) make - name: Set up Regal From 56cfa3f377e01906d9842fc9ed93f58f581d6887 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Sun, 26 Nov 2023 14:37:21 +0200 Subject: [PATCH 048/195] modified: .github/workflows/pr-tests.yaml --- .github/workflows/pr-tests.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 667e55208..760c831aa 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -47,7 +47,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.20 + go-version: '1.20' # testing rego library - name: Test Regoes @@ -59,6 +59,7 @@ jobs: for i in {1..5}; do sudo apt install -y cmake && break || sleep 15; done + echo "Using Go path: $(which go)" GOPATH=$(go env GOPATH) make - name: Set up Regal From 3b6827e303132f269a719ef487d845901a019985 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Sun, 26 Nov 2023 14:54:38 +0200 Subject: [PATCH 049/195] fix rules --- rules/rule-identify-old-k8s-registry/rule.metadata.json | 2 +- rules/rule-privileged-container/rule.metadata.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rules/rule-identify-old-k8s-registry/rule.metadata.json b/rules/rule-identify-old-k8s-registry/rule.metadata.json index 569e22f9d..a2b096893 100644 --- a/rules/rule-identify-old-k8s-registry/rule.metadata.json +++ b/rules/rule-identify-old-k8s-registry/rule.metadata.json @@ -1,7 +1,7 @@ { "name": "rule-identify-old-k8s-registry", "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry" }, "ruleLanguage": "Rego", "match": [ diff --git a/rules/rule-privileged-container/rule.metadata.json b/rules/rule-privileged-container/rule.metadata.json index 02fabb95d..2de543739 100644 --- a/rules/rule-privileged-container/rule.metadata.json +++ b/rules/rule-privileged-container/rule.metadata.json @@ -3,7 +3,7 @@ "attributes": { "m$K8sThreatMatrix": "Privilege Escalation::privileged container", "mitre": "Privilege Escalation", - "mitreCode": "TA0004", + "mitreCode": "TA0004" }, "ruleLanguage": "Rego", "match": [ From 5610b5a9911af87417463a9c740a1917ecca2a00 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 26 Nov 2023 15:05:37 +0200 Subject: [PATCH 050/195] use fixpaths instead of reviewpaths Signed-off-by: YiscahLevySilas1 --- rules/set-fsgroup-value/raw.rego | 32 ++++--------------- .../test/cronjob/expected.json | 6 +--- .../set-fsgroup-value/test/pod/expected.json | 6 +--- .../test/workload/expected.json | 6 +--- 4 files changed, 10 insertions(+), 40 deletions(-) diff --git a/rules/set-fsgroup-value/raw.rego b/rules/set-fsgroup-value/raw.rego index 9d81b6076..c5deac6a9 100644 --- a/rules/set-fsgroup-value/raw.rego +++ b/rules/set-fsgroup-value/raw.rego @@ -16,16 +16,13 @@ deny[msga] { securityContextPath := "spec.securityContext" - paths := get_paths(pod, securityContextPath) + fixPaths = [{"path":sprintf("%v.fsGroup", [securityContextPath]), "value": "YOUR_VALUE"}] - msga := { "alertMessage": sprintf("Pod: %v does not set 'securityContext.fsGroup' with allowed value", [pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": paths["failedPaths"], - "failedPaths": paths["failedPaths"], - "fixPaths": paths["fixPaths"], + "fixPaths": fixPaths, "alertObject": { "k8sApiObjects": [pod] } @@ -46,15 +43,13 @@ deny[msga] { securityContextPath := "spec.jobTemplate.spec.template.spec.securityContext" - paths := get_paths(cj, securityContextPath) + fixPaths = [{"path":sprintf("%v.fsGroup", [securityContextPath]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("CronJob: %v does not set 'securityContext.fsGroup' with allowed value", [cj.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": paths["failedPaths"], - "failedPaths": paths["failedPaths"], - "fixPaths": paths["fixPaths"], + "fixPaths": fixPaths, "alertObject": { "k8sApiObjects": [cj] } @@ -74,16 +69,14 @@ deny[msga] { # check securityContext has fsGroup set properly not fsGroupSetProperly(wl.spec.template.spec.securityContext) - path := "spec.template.spec.securityContext" - paths := get_paths(wl, path) + securityContextPath := "spec.template.spec.securityContext" + fixPaths = [{"path":sprintf("%v.fsGroup", [securityContextPath]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Workload: %v does not set 'securityContext.fsGroup' with allowed value", [wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": paths["failedPaths"], - "failedPaths": paths["failedPaths"], - "fixPaths": paths["fixPaths"], + "fixPaths": fixPaths, "alertObject": { "k8sApiObjects": [wl] } @@ -94,14 +87,3 @@ deny[msga] { fsGroupSetProperly(securityContext) := true if { securityContext.fsGroup >= 0 } else := false - - -get_paths(resources, securityContextPath) := result { - - objectPath := array.concat(split(securityContextPath, "."), ["fsGroup"]) - object.get(resources, objectPath, false) - - result = {"failedPaths": [], "fixPaths": [{"path":sprintf("%v.fsGroup", [securityContextPath]), "value": "YOUR_VALUE"}]} -} else = result { - result = {"failedPaths": [securityContextPath], "fixPaths": []} -} diff --git a/rules/set-fsgroup-value/test/cronjob/expected.json b/rules/set-fsgroup-value/test/cronjob/expected.json index b6e50f572..f46d0c7af 100644 --- a/rules/set-fsgroup-value/test/cronjob/expected.json +++ b/rules/set-fsgroup-value/test/cronjob/expected.json @@ -3,9 +3,7 @@ "alertMessage": "CronJob: hello1 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": ["spec.jobTemplate.spec.template.spec.securityContext"], - "failedPaths": ["spec.jobTemplate.spec.template.spec.securityContext"], - "fixPaths": [], + "fixPaths": [{"path":"spec.jobTemplate.spec.template.spec.securityContext.fsGroup", "value": "YOUR_VALUE"}], "ruleStatus": "", "alertObject": { "k8sApiObjects": [ @@ -23,8 +21,6 @@ "alertMessage": "CronJob: hello2 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": [], - "failedPaths": [], "fixPaths": [{"path":"spec.jobTemplate.spec.template.spec.securityContext.fsGroup", "value": "YOUR_VALUE"}], "ruleStatus": "", "alertObject": { diff --git a/rules/set-fsgroup-value/test/pod/expected.json b/rules/set-fsgroup-value/test/pod/expected.json index 2c2989d96..853e1d5fd 100644 --- a/rules/set-fsgroup-value/test/pod/expected.json +++ b/rules/set-fsgroup-value/test/pod/expected.json @@ -3,9 +3,7 @@ "alertMessage": "Pod: nginx1 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": ["spec.securityContext"], - "failedPaths": ["spec.securityContext"], - "fixPaths": [], + "fixPaths": [{"path":"spec.securityContext.fsGroup", "value": "YOUR_VALUE"}], "ruleStatus": "", "alertObject": { "k8sApiObjects": [ @@ -23,8 +21,6 @@ "alertMessage": "Pod: nginx2 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": [], - "failedPaths": [], "fixPaths": [{"path":"spec.securityContext.fsGroup", "value": "YOUR_VALUE"}], "ruleStatus": "", "alertObject": { diff --git a/rules/set-fsgroup-value/test/workload/expected.json b/rules/set-fsgroup-value/test/workload/expected.json index f20bafa55..45c1fda6f 100644 --- a/rules/set-fsgroup-value/test/workload/expected.json +++ b/rules/set-fsgroup-value/test/workload/expected.json @@ -3,9 +3,7 @@ "alertMessage": "Workload: my-deployment1 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": ["spec.template.spec.securityContext"], - "failedPaths": ["spec.template.spec.securityContext"], - "fixPaths": [], + "fixPaths": [{"path":"spec.template.spec.securityContext.fsGroup", "value": "YOUR_VALUE"}], "ruleStatus": "", "alertObject": { "k8sApiObjects": [ @@ -26,8 +24,6 @@ "alertMessage": "Workload: my-deployment2 does not set 'securityContext.fsGroup' with allowed value", "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": [], - "failedPaths": [], "fixPaths": [{"path":"spec.template.spec.securityContext.fsGroup", "value": "YOUR_VALUE"}], "ruleStatus": "", "alertObject": { From 79715934f0de973bb0e0d4258054c42dbee5f161 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 26 Nov 2023 15:07:40 +0200 Subject: [PATCH 051/195] fix file format Signed-off-by: YiscahLevySilas1 --- rules/set-fsgroup-value/raw.rego | 77 ++++++++++++++------------------ 1 file changed, 34 insertions(+), 43 deletions(-) diff --git a/rules/set-fsgroup-value/raw.rego b/rules/set-fsgroup-value/raw.rego index c5deac6a9..ec90d5f3d 100644 --- a/rules/set-fsgroup-value/raw.rego +++ b/rules/set-fsgroup-value/raw.rego @@ -6,84 +6,75 @@ import future.keywords.if # Fails if securityContext.fsGroup does not have a values >= 0 deny[msga] { - # verify the object kind - pod := input[_] - pod.kind = "Pod" + # verify the object kind + pod := input[_] + pod.kind = "Pod" - # check securityContext has fsGroup set properly - not fsGroupSetProperly(pod.spec.securityContext) + # check securityContext has fsGroup set properly + not fsGroupSetProperly(pod.spec.securityContext) + securityContextPath := "spec.securityContext" - securityContextPath := "spec.securityContext" + fixPaths = [{"path": sprintf("%v.fsGroup", [securityContextPath]), "value": "YOUR_VALUE"}] - fixPaths = [{"path":sprintf("%v.fsGroup", [securityContextPath]), "value": "YOUR_VALUE"}] - - msga := { + msga := { "alertMessage": sprintf("Pod: %v does not set 'securityContext.fsGroup' with allowed value", [pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, "fixPaths": fixPaths, - "alertObject": { - "k8sApiObjects": [pod] - } - } + "alertObject": {"k8sApiObjects": [pod]}, + } } - ### CRONJOB ### # Fails if securityContext.fsGroup does not have a values >= 0 deny[msga] { - # verify the object kind - cj := input[_] - cj.kind == "CronJob" + # verify the object kind + cj := input[_] + cj.kind == "CronJob" + + # check securityContext has fsGroup set properly + not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext) - # check securityContext has fsGroup set properly - not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext) + securityContextPath := "spec.jobTemplate.spec.template.spec.securityContext" - securityContextPath := "spec.jobTemplate.spec.template.spec.securityContext" + fixPaths = [{"path": sprintf("%v.fsGroup", [securityContextPath]), "value": "YOUR_VALUE"}] - fixPaths = [{"path":sprintf("%v.fsGroup", [securityContextPath]), "value": "YOUR_VALUE"}] - - msga := { + msga := { "alertMessage": sprintf("CronJob: %v does not set 'securityContext.fsGroup' with allowed value", [cj.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, "fixPaths": fixPaths, - "alertObject": { - "k8sApiObjects": [cj] - } - } + "alertObject": {"k8sApiObjects": [cj]}, + } } - ### WORKLOAD ### # Fails if securityContext.fsGroup does not have a values >= 0 deny[msga] { - # verify the object kind - wl := input[_] - manifest_kind := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} - manifest_kind[wl.kind] + # verify the object kind + wl := input[_] + manifest_kind := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} + manifest_kind[wl.kind] - # check securityContext has fsGroup set properly - not fsGroupSetProperly(wl.spec.template.spec.securityContext) + # check securityContext has fsGroup set properly + not fsGroupSetProperly(wl.spec.template.spec.securityContext) - securityContextPath := "spec.template.spec.securityContext" - fixPaths = [{"path":sprintf("%v.fsGroup", [securityContextPath]), "value": "YOUR_VALUE"}] + securityContextPath := "spec.template.spec.securityContext" + fixPaths = [{"path": sprintf("%v.fsGroup", [securityContextPath]), "value": "YOUR_VALUE"}] - msga := { + msga := { "alertMessage": sprintf("Workload: %v does not set 'securityContext.fsGroup' with allowed value", [wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, "fixPaths": fixPaths, - "alertObject": { - "k8sApiObjects": [wl] - } - } + "alertObject": {"k8sApiObjects": [wl]}, + } } # fsGroupSetProperly checks if fsGroup has a value >= 0. -fsGroupSetProperly(securityContext) := true if { - securityContext.fsGroup >= 0 +fsGroupSetProperly(securityContext) if { + securityContext.fsGroup >= 0 } else := false From f8b9a77dc1c98f2c89e48bfee2429cbd0c5420cc Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 26 Nov 2023 15:54:48 +0200 Subject: [PATCH 052/195] use fixpaths instead of reviewpaths Signed-off-by: YiscahLevySilas1 --- rules/set-procmount-default/raw.rego | 120 +++++++++--------- .../test/cronjob/expected.json | 39 +++--- .../test/pod/expected.json | 39 +++--- .../test/workload/expected.json | 45 ++++--- .../test/workload/input/deployment.yaml | 1 - 5 files changed, 123 insertions(+), 121 deletions(-) diff --git a/rules/set-procmount-default/raw.rego b/rules/set-procmount-default/raw.rego index 71b43255e..50d170851 100644 --- a/rules/set-procmount-default/raw.rego +++ b/rules/set-procmount-default/raw.rego @@ -1,102 +1,96 @@ package armo_builtins +import future.keywords.if + # Fails if container does not define the "procMount" parameter as "Default" deny[msga] { - # checks at first if we the procMountType feature gate is enabled on the api-server - obj := input[_] - is_control_plane_info(obj) - is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine) + # checks at first if we the procMountType feature gate is enabled on the api-server + obj := input[_] + is_control_plane_info(obj) + is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine) - # checks if procMount paramenter has the right value in containers - pod := input[_] - pod.kind = "Pod" + # checks if procMount paramenter has the right value in containers + pod := input[_] + pod.kind = "Pod" # retrieve container list - container := pod.spec.containers[i] - container.securityContext.procMount != "Default" + container := pod.spec.containers[i] + not procMountSetProperly(container.securityContext) - path := sprintf("containers[%d].securityContext.procMount", [i]) - msga := { + fixPaths = [{"path": sprintf("containers[%d].securityContext.procMount", [i]), "value": "Default"}] + msga := { "alertMessage": sprintf("Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'", [pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": [path], - "failedPaths": [path], - "fixPaths": [], - "alertObject": { - "k8sApiObjects": [pod] - } - } + "fixPaths": fixPaths, + "alertObject": {"k8sApiObjects": [pod]}, + } } deny[msga] { - # checks at first if we the procMountType feature gate is enabled on the api-server - obj := input[_] - is_control_plane_info(obj) - is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine) - - # checks if we are managing the right workload kind - wl := input[_] - manifest_kind := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} - manifest_kind[wl.kind] - - # retrieve container list - container := wl.spec.template.spec.containers[i] - container.securityContext.procMount != "Default" - - path := sprintf("containers[%d].securityContext.procMount", [i]) - msga := { + # checks at first if we the procMountType feature gate is enabled on the api-server + obj := input[_] + is_control_plane_info(obj) + is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine) + + # checks if we are managing the right workload kind + wl := input[_] + manifest_kind := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} + manifest_kind[wl.kind] + + # retrieve container list + container := wl.spec.template.spec.containers[i] + not procMountSetProperly(container.securityContext) + + fixPaths = [{"path": sprintf("wl.spec.template.spec.containers[%d].securityContext.procMount", [i]), "value": "Default"}] + msga := { "alertMessage": sprintf("Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'", [wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": [path], - "failedPaths": [path], - "fixPaths": [], - "alertObject": { - "k8sApiObjects": [wl] - } - } + "fixPaths": fixPaths, + "alertObject": {"k8sApiObjects": [wl]}, + } } deny[msga] { - # checks at first if we the procMountType feature gate is enabled on the api-server - obj := input[_] - is_control_plane_info(obj) - is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine) + # checks at first if we the procMountType feature gate is enabled on the api-server + obj := input[_] + is_control_plane_info(obj) + is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine) - # checks if we are managing the right workload kind - cj := input[_] - cj.kind = "CronJob" + # checks if we are managing the right workload kind + cj := input[_] + cj.kind = "CronJob" - # retrieve container list - container := cj.spec.jobTemplate.spec.template.spec.containers[i] - container.securityContext.procMount != "Default" + # retrieve container list + container := cj.spec.jobTemplate.spec.template.spec.containers[i] + not procMountSetProperly(container.securityContext) - path := sprintf("containers[%d].securityContext.procMount", [i]) - msga := { + fixPaths = [{"path": sprintf("cj.spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount", [i]), "value": "Default"}] + msga := { "alertMessage": sprintf("CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'", [cj.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": [path], - "failedPaths": [path], - "fixPaths": [], - "alertObject": { - "k8sApiObjects": [cj] - } - } + "fixPaths": fixPaths, + "alertObject": {"k8sApiObjects": [cj]}, + } } - # check if we are managing ControlPlaneInfo -is_control_plane_info(obj) { +is_control_plane_info(obj) if { obj.apiVersion == "hostdata.kubescape.cloud/v1beta0" obj.kind == "ControlPlaneInfo" } # check if ProcMountType feature-gate is enabled -is_proc_mount_type_enabled(command) { +is_proc_mount_type_enabled(command) if { contains(command, "--feature-gates=") args := regex.split(` +`, command) some i regex.match(`ProcMountType=true`, args[i]) } + +# procMountSetProperly checks if procMount has value of "Default". +procMountSetProperly(securityContext) if { + securityContext.procMount == "Default" +} else := false diff --git a/rules/set-procmount-default/test/cronjob/expected.json b/rules/set-procmount-default/test/cronjob/expected.json index 26c2a3001..6ca670c4b 100644 --- a/rules/set-procmount-default/test/cronjob/expected.json +++ b/rules/set-procmount-default/test/cronjob/expected.json @@ -1,22 +1,25 @@ [ - { - "alertMessage": "CronJob: hello has containers that do not set 'securityContext.procMount' to 'Default'", - "packagename": "armo_builtins", - "alertScore": 7, - "reviewPaths": ["containers[0].securityContext.procMount"], - "failedPaths": ["containers[0].securityContext.procMount"], - "fixPaths": [], - "ruleStatus": "", - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "batch/v1beta1", - "kind": "CronJob", - "metadata": { - "name": "hello" - } - } - ] + { + "alertMessage": "CronJob: hello has containers that do not set 'securityContext.procMount' to 'Default'", + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": [ + { + "path": "cj.spec.jobTemplate.spec.template.spec.containers[0].securityContext.procMount", + "value": "Default" + } + ], + "ruleStatus": "", + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } } + ] } + } ] \ No newline at end of file diff --git a/rules/set-procmount-default/test/pod/expected.json b/rules/set-procmount-default/test/pod/expected.json index 4c75f435c..094a16d29 100644 --- a/rules/set-procmount-default/test/pod/expected.json +++ b/rules/set-procmount-default/test/pod/expected.json @@ -1,22 +1,25 @@ [ - { - "alertMessage": "Pod: nginx has containers that do not set 'securityContext.procMount' to 'Default'", - "packagename": "armo_builtins", - "alertScore": 7, - "reviewPaths": ["containers[1].securityContext.procMount"], - "failedPaths": ["containers[1].securityContext.procMount"], - "fixPaths": [], - "ruleStatus": "", - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": "nginx" - } - } - ] + { + "alertMessage": "Pod: nginx has containers that do not set 'securityContext.procMount' to 'Default'", + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": [ + { + "path": "containers[1].securityContext.procMount", + "value": "Default" + } + ], + "ruleStatus": "", + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "nginx" + } } + ] } + } ] \ No newline at end of file diff --git a/rules/set-procmount-default/test/workload/expected.json b/rules/set-procmount-default/test/workload/expected.json index bb95bf961..38c63f334 100644 --- a/rules/set-procmount-default/test/workload/expected.json +++ b/rules/set-procmount-default/test/workload/expected.json @@ -1,25 +1,28 @@ [ - { - "alertMessage": "Workload: my-deployment has containers that do not set 'securityContext.procMount' to 'Default'", - "packagename": "armo_builtins", - "alertScore": 7, - "reviewPaths": ["containers[1].securityContext.procMount"], - "failedPaths": ["containers[1].securityContext.procMount"], - "fixPaths": [], - "ruleStatus": "", - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "name": "my-deployment", - "labels": { - "app": "goproxy" - } - } - } - ] + { + "alertMessage": "Workload: my-deployment has containers that do not set 'securityContext.procMount' to 'Default'", + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": [ + { + "path": "wl.spec.template.spec.containers[1].securityContext.procMount", + "value": "Default" + } + ], + "ruleStatus": "", + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "my-deployment", + "labels": { + "app": "goproxy" + } + } } + ] } + } ] \ No newline at end of file diff --git a/rules/set-procmount-default/test/workload/input/deployment.yaml b/rules/set-procmount-default/test/workload/input/deployment.yaml index 7604b3acb..a3dc26a23 100644 --- a/rules/set-procmount-default/test/workload/input/deployment.yaml +++ b/rules/set-procmount-default/test/workload/input/deployment.yaml @@ -23,7 +23,6 @@ spec: - name : php image : php:7.0-apache securityContext: - procMount: Unmasked volumeMounts : - name : site-data mountPath : /var/www/html From 989a653359f27af18642ac4fc3a3f01d241bc40b Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 26 Nov 2023 16:05:49 +0200 Subject: [PATCH 053/195] fix paths Signed-off-by: YiscahLevySilas1 --- rules/set-procmount-default/raw.rego | 6 +++--- rules/set-procmount-default/test/cronjob/expected.json | 2 +- rules/set-procmount-default/test/pod/expected.json | 2 +- rules/set-procmount-default/test/workload/expected.json | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/rules/set-procmount-default/raw.rego b/rules/set-procmount-default/raw.rego index 50d170851..d605ae745 100644 --- a/rules/set-procmount-default/raw.rego +++ b/rules/set-procmount-default/raw.rego @@ -17,7 +17,7 @@ deny[msga] { container := pod.spec.containers[i] not procMountSetProperly(container.securityContext) - fixPaths = [{"path": sprintf("containers[%d].securityContext.procMount", [i]), "value": "Default"}] + fixPaths = [{"path": sprintf("spec.containers[%d].securityContext.procMount", [i]), "value": "Default"}] msga := { "alertMessage": sprintf("Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'", [pod.metadata.name]), "packagename": "armo_builtins", @@ -42,7 +42,7 @@ deny[msga] { container := wl.spec.template.spec.containers[i] not procMountSetProperly(container.securityContext) - fixPaths = [{"path": sprintf("wl.spec.template.spec.containers[%d].securityContext.procMount", [i]), "value": "Default"}] + fixPaths = [{"path": sprintf("spec.template.spec.containers[%d].securityContext.procMount", [i]), "value": "Default"}] msga := { "alertMessage": sprintf("Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'", [wl.metadata.name]), "packagename": "armo_builtins", @@ -66,7 +66,7 @@ deny[msga] { container := cj.spec.jobTemplate.spec.template.spec.containers[i] not procMountSetProperly(container.securityContext) - fixPaths = [{"path": sprintf("cj.spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount", [i]), "value": "Default"}] + fixPaths = [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount", [i]), "value": "Default"}] msga := { "alertMessage": sprintf("CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'", [cj.metadata.name]), "packagename": "armo_builtins", diff --git a/rules/set-procmount-default/test/cronjob/expected.json b/rules/set-procmount-default/test/cronjob/expected.json index 6ca670c4b..130dead1f 100644 --- a/rules/set-procmount-default/test/cronjob/expected.json +++ b/rules/set-procmount-default/test/cronjob/expected.json @@ -5,7 +5,7 @@ "alertScore": 7, "fixPaths": [ { - "path": "cj.spec.jobTemplate.spec.template.spec.containers[0].securityContext.procMount", + "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.procMount", "value": "Default" } ], diff --git a/rules/set-procmount-default/test/pod/expected.json b/rules/set-procmount-default/test/pod/expected.json index 094a16d29..bd4f615e5 100644 --- a/rules/set-procmount-default/test/pod/expected.json +++ b/rules/set-procmount-default/test/pod/expected.json @@ -5,7 +5,7 @@ "alertScore": 7, "fixPaths": [ { - "path": "containers[1].securityContext.procMount", + "path": "spec.containers[1].securityContext.procMount", "value": "Default" } ], diff --git a/rules/set-procmount-default/test/workload/expected.json b/rules/set-procmount-default/test/workload/expected.json index 38c63f334..1a27b5233 100644 --- a/rules/set-procmount-default/test/workload/expected.json +++ b/rules/set-procmount-default/test/workload/expected.json @@ -5,7 +5,7 @@ "alertScore": 7, "fixPaths": [ { - "path": "wl.spec.template.spec.containers[1].securityContext.procMount", + "path": "spec.template.spec.containers[1].securityContext.procMount", "value": "Default" } ], From a423daca18904b32b01af3ca891f8e94edf02d2a Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 26 Nov 2023 16:22:22 +0200 Subject: [PATCH 054/195] use fixpaths instead of reviewpaths Signed-off-by: YiscahLevySilas1 --- rules/set-supplementalgroups-values/raw.rego | 58 ++++++++----------- .../test/cronjob/expected.json | 39 +++++++------ .../test/pod/expected.json | 39 +++++++------ .../test/workload/expected.json | 45 +++++++------- 4 files changed, 89 insertions(+), 92 deletions(-) diff --git a/rules/set-supplementalgroups-values/raw.rego b/rules/set-supplementalgroups-values/raw.rego index caca884db..0b10e491c 100644 --- a/rules/set-supplementalgroups-values/raw.rego +++ b/rules/set-supplementalgroups-values/raw.rego @@ -4,74 +4,62 @@ package armo_builtins # Fails if securityContext.supplementalGroups is not set deny[msga] { - # verify the object kind + # verify the object kind pod := input[_] pod.kind = "Pod" # check securityContext has supplementalGroups set - not pod.spec.securityContext.supplementalGroups + not pod.spec.securityContext.supplementalGroups + fixPaths = [{"path": "spec.securityContext.supplementalGroups", "value": "YOUR_VALUE"}] - path := "spec.securityContext" - msga := { + msga := { "alertMessage": sprintf("Pod: %v does not set 'securityContext.supplementalGroups'", [pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": [path], - "failedPaths": [path], - "fixPaths": [], - "alertObject": { - "k8sApiObjects": [pod] - } - } + "fixPaths": fixPaths, + "alertObject": {"k8sApiObjects": [pod]}, + } } ### WORKLOAD ### # Fails if securityContext.supplementalGroups is not set deny[msga] { - # verify the object kind + # verify the object kind wl := input[_] - manifest_kind := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + manifest_kind := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} manifest_kind[wl.kind] # check securityContext has supplementalGroups set - not wl.spec.template.spec.securityContext.supplementalGroups + not wl.spec.template.spec.securityContext.supplementalGroups + fixPaths = [{"path": "spec.template.spec.securityContext.supplementalGroups", "value": "YOUR_VALUE"}] - path := "spec.template.spec.securityContext" - msga := { + msga := { "alertMessage": sprintf("Workload: %v does not set 'securityContext.supplementalGroups'", [wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": [path], - "failedPaths": [path], - "fixPaths": [], - "alertObject": { - "k8sApiObjects": [wl] - } - } + "fixPaths": fixPaths, + "alertObject": {"k8sApiObjects": [wl]}, + } } ### CRONJOB ### # Fails if securityContext.supplementalGroups is not set deny[msga] { - # verify the object kind + # verify the object kind cj := input[_] - cj.kind == "CronJob" + cj.kind == "CronJob" # check securityContext has supplementalGroups set - not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups + not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups + fixPaths = [{"path": "spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups", "value": "YOUR_VALUE"}] - path := "spec.jobTemplate.spec.template.spec.securityContext" - msga := { + msga := { "alertMessage": sprintf("CronJob: %v does not set 'securityContext.supplementalGroups'", [cj.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": [path], - "failedPaths": [path], - "fixPaths": [], - "alertObject": { - "k8sApiObjects": [cj] - } - } + "fixPaths": fixPaths, + "alertObject": {"k8sApiObjects": [cj]}, + } } diff --git a/rules/set-supplementalgroups-values/test/cronjob/expected.json b/rules/set-supplementalgroups-values/test/cronjob/expected.json index c828ba8cf..0358b32b3 100644 --- a/rules/set-supplementalgroups-values/test/cronjob/expected.json +++ b/rules/set-supplementalgroups-values/test/cronjob/expected.json @@ -1,22 +1,25 @@ [ - { - "alertMessage": "CronJob: hello does not set 'securityContext.supplementalGroups'", - "packagename": "armo_builtins", - "alertScore": 7, - "reviewPaths": ["spec.jobTemplate.spec.template.spec.securityContext"], - "failedPaths": ["spec.jobTemplate.spec.template.spec.securityContext"], - "fixPaths": [], - "ruleStatus": "", - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "batch/v1beta1", - "kind": "CronJob", - "metadata": { - "name": "hello" - } - } - ] + { + "alertMessage": "CronJob: hello does not set 'securityContext.supplementalGroups'", + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } } + ] } + } ] \ No newline at end of file diff --git a/rules/set-supplementalgroups-values/test/pod/expected.json b/rules/set-supplementalgroups-values/test/pod/expected.json index f6f7dcf0a..6da488301 100644 --- a/rules/set-supplementalgroups-values/test/pod/expected.json +++ b/rules/set-supplementalgroups-values/test/pod/expected.json @@ -1,22 +1,25 @@ [ - { - "alertMessage": "Pod: nginx does not set 'securityContext.supplementalGroups'", - "packagename": "armo_builtins", - "alertScore": 7, - "reviewPaths": ["spec.securityContext"], - "failedPaths": ["spec.securityContext"], - "fixPaths": [], - "ruleStatus": "", - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": "nginx" - } - } - ] + { + "alertMessage": "Pod: nginx does not set 'securityContext.supplementalGroups'", + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": [ + { + "path": "spec.securityContext.supplementalGroups", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "nginx" + } } + ] } + } ] \ No newline at end of file diff --git a/rules/set-supplementalgroups-values/test/workload/expected.json b/rules/set-supplementalgroups-values/test/workload/expected.json index e093db121..1c9a75f85 100644 --- a/rules/set-supplementalgroups-values/test/workload/expected.json +++ b/rules/set-supplementalgroups-values/test/workload/expected.json @@ -1,25 +1,28 @@ [ - { - "alertMessage": "Workload: my-deployment does not set 'securityContext.supplementalGroups'", - "packagename": "armo_builtins", - "alertScore": 7, - "reviewPaths": ["spec.template.spec.securityContext"], - "failedPaths": ["spec.template.spec.securityContext"], - "fixPaths": [], - "ruleStatus": "", - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "name": "my-deployment", - "labels": { - "app": "goproxy" - } - } - } - ] + { + "alertMessage": "Workload: my-deployment does not set 'securityContext.supplementalGroups'", + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": [ + { + "path": "spec.template.spec.securityContext.supplementalGroups", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "my-deployment", + "labels": { + "app": "goproxy" + } + } } + ] } + } ] \ No newline at end of file From 716267ac70a68eb1376317043c1ab1603a5268b5 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 27 Nov 2023 12:30:23 +0200 Subject: [PATCH 055/195] use fixpaths instead of reviewpaths when possible Signed-off-by: YiscahLevySilas1 --- rules/non-root-containers/raw.rego | 13 ++--- .../test/cronjob/expected.json | 9 ++-- .../test/pod/expected.json | 54 +++++++++++-------- .../test/pod/input/pod.yaml | 2 +- 4 files changed, 44 insertions(+), 34 deletions(-) diff --git a/rules/non-root-containers/raw.rego b/rules/non-root-containers/raw.rego index 08c6e74ae..1b993a9bb 100644 --- a/rules/non-root-containers/raw.rego +++ b/rules/non-root-containers/raw.rego @@ -107,14 +107,13 @@ evaluate_workload_non_root_container(container, pod, start_of_path) = alertInfo ################################################################################# # Value resolution functions +# TODO - refactor functions, can be simplified get_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot { - failed_path := sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]) - runAsNonRoot := {"value" : container.securityContext.runAsNonRoot, "failed_path" : failed_path, "fixPath": [] ,"defined" : true} + runAsNonRoot := {"value" : container.securityContext.runAsNonRoot, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}] ,"defined" : true} } else = runAsNonRoot { - failed_path := sprintf("%v.securityContext.runAsNonRoot", [start_of_path]) - runAsNonRoot := {"value" : pod.spec.securityContext.runAsNonRoot, "failed_path" : failed_path, "fixPath": [], "defined" : true} + runAsNonRoot := {"value" : pod.spec.securityContext.runAsNonRoot, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}], "defined" : true} } else = {"value" : false, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}], "defined" : false} { is_allow_privilege_escalation_field(container, pod) } else = {"value" : false, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]) , "value":"true"}, {"path":sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], "defined" : false} @@ -145,11 +144,9 @@ get_run_as_group_value(container, pod, start_of_path) = runAsGroup { } get_allow_privilege_escalation(container, pod, start_of_path) = allowPrivilegeEscalation { - failed_path := sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]) - allowPrivilegeEscalation := {"value" : container.securityContext.allowPrivilegeEscalation, "failed_path" : failed_path, "fixPath": [],"defined" : true} + allowPrivilegeEscalation := {"value" : container.securityContext.allowPrivilegeEscalation, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], "defined" : true} } else = allowPrivilegeEscalation { - failed_path := sprintf("%v.securityContext.allowPrivilegeEscalation", [start_of_path]) - allowPrivilegeEscalation := {"value" : pod.spec.securityContext.allowPrivilegeEscalation, "failed_path" : failed_path, "fixPath": [],"defined" : true} + allowPrivilegeEscalation := {"value" : pod.spec.securityContext.allowPrivilegeEscalation, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], "defined" : true} } else = {"value" : true, "failed_path": "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], "defined" : false} choose_first_if_defined(l1, l2) = c { diff --git a/rules/non-root-containers/test/cronjob/expected.json b/rules/non-root-containers/test/cronjob/expected.json index 3a60e688a..979c609ea 100644 --- a/rules/non-root-containers/test/cronjob/expected.json +++ b/rules/non-root-containers/test/cronjob/expected.json @@ -23,9 +23,12 @@ } }, { "alertMessage": "container :hello2 in CronJob: hello may run as root", - "reviewPaths": ["spec.jobTemplate.spec.template.spec.containers[1].securityContext.runAsNonRoot"], - "failedPaths": ["spec.jobTemplate.spec.template.spec.containers[1].securityContext.runAsNonRoot"], - "fixPaths": [], + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [{ + "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.runAsNonRoot", + "value": "true" + }], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, diff --git a/rules/non-root-containers/test/pod/expected.json b/rules/non-root-containers/test/pod/expected.json index 3b8482116..5687695ac 100644 --- a/rules/non-root-containers/test/pod/expected.json +++ b/rules/non-root-containers/test/pod/expected.json @@ -1,24 +1,34 @@ -[{ - "alertMessage": "container: web in pod: static-web may run as root", - "reviewPaths": [], - "failedPaths": [], - "fixPaths": [{ - "path": "spec.containers[0].securityContext.runAsNonRoot", - "value": "true" - }], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "labels": { - "role": "myrole" - }, - "name": "static-web" +[ + { + "alertMessage": "container: web in pod: static-web may run as root", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[0].securityContext.runAsNonRoot", + "value": "true" + }, + { + "path": "spec.containers[0].securityContext.allowPrivilegeEscalation", + "value": "false" } - }] + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "role": "myrole" + }, + "name": "static-web" + } + } + ] + } } -}] \ No newline at end of file +] \ No newline at end of file diff --git a/rules/non-root-containers/test/pod/input/pod.yaml b/rules/non-root-containers/test/pod/input/pod.yaml index 8dc43eecc..cf9f1aac4 100644 --- a/rules/non-root-containers/test/pod/input/pod.yaml +++ b/rules/non-root-containers/test/pod/input/pod.yaml @@ -6,7 +6,7 @@ metadata: role: myrole spec: securityContext: - allowPrivilegeEscalation: false + allowPrivilegeEscalation: true containers: - name: web image: nginx From 0525a4564685a588e6c1f059c75c87afbca286a3 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 27 Nov 2023 12:59:36 +0200 Subject: [PATCH 056/195] clean code Signed-off-by: YiscahLevySilas1 --- rules/immutable-container-filesystem/raw.rego | 49 ++++++------------- 1 file changed, 16 insertions(+), 33 deletions(-) diff --git a/rules/immutable-container-filesystem/raw.rego b/rules/immutable-container-filesystem/raw.rego index 2ea41df27..f3438c6d3 100644 --- a/rules/immutable-container-filesystem/raw.rego +++ b/rules/immutable-container-filesystem/raw.rego @@ -7,15 +7,14 @@ deny[msga] { pod.kind == "Pod" container := pod.spec.containers[i] start_of_path := "spec." - result := is_mutable_filesystem(container, start_of_path, i) - failed_path := get_failed_path(result) - fixed_path := get_fixed_path(result) + is_mutable_filesystem(container) + fixPath = {"path": sprintf("%vcontainers[%d].securityContext.readOnlyRootFilesystem", [start_of_path, i]), "value": "true"} msga := { "alertMessage": sprintf("container: %v in pod: %v has mutable filesystem", [container.name, pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "failedPaths": failed_path, - "fixPaths": fixed_path, + "failedPaths": [], + "fixPaths": [fixPath], "alertObject": { "k8sApiObjects": [pod] } @@ -29,15 +28,14 @@ deny[msga] { spec_template_spec_patterns[wl.kind] container := wl.spec.template.spec.containers[i] start_of_path := "spec.template.spec." - result := is_mutable_filesystem(container, start_of_path, i) - failed_path := get_failed_path(result) - fixed_path := get_fixed_path(result) + is_mutable_filesystem(container) + fixPath = {"path": sprintf("%vcontainers[%d].securityContext.readOnlyRootFilesystem", [start_of_path, i]), "value": "true"} msga := { "alertMessage": sprintf("container :%v in %v: %v has mutable filesystem", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "failedPaths": failed_path, - "fixPaths": fixed_path, + "failedPaths": [], + "fixPaths": [fixPath], "alertObject": { "k8sApiObjects": [wl] } @@ -51,16 +49,15 @@ deny[msga] { wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] start_of_path := "spec.jobTemplate.spec.template.spec." - result := is_mutable_filesystem(container, start_of_path, i) - failed_path := get_failed_path(result) - fixed_path := get_fixed_path(result) + is_mutable_filesystem(container) + fixPath = {"path": sprintf("%vcontainers[%d].securityContext.readOnlyRootFilesystem", [start_of_path, i]), "value": "true"} msga := { "alertMessage": sprintf("container :%v in %v: %v has mutable filesystem", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "failedPaths": failed_path, - "fixPaths": fixed_path, + "failedPaths": [], + "fixPaths": [fixPath], "alertObject": { "k8sApiObjects": [wl] } @@ -68,25 +65,11 @@ deny[msga] { } # Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec -is_mutable_filesystem(container, start_of_path, i) = [failed_path, fixPath] { +is_mutable_filesystem(container) { container.securityContext.readOnlyRootFilesystem == false - fixPath = {"path": sprintf("%vcontainers[%v].securityContext.readOnlyRootFilesystem", [start_of_path, format_int(i, 10)]), "value": "true"} - failed_path = "" - } +} - is_mutable_filesystem(container, start_of_path, i) = [failed_path, fixPath] { +is_mutable_filesystem(container) { not container.securityContext.readOnlyRootFilesystem == false not container.securityContext.readOnlyRootFilesystem == true - fixPath = {"path": sprintf("%vcontainers[%v].securityContext.readOnlyRootFilesystem", [start_of_path, format_int(i, 10)]), "value": "true"} - failed_path = "" - } - - - get_failed_path(paths) = [paths[0]] { - paths[0] != "" -} else = [] - - -get_fixed_path(paths) = [paths[1]] { - paths[1] != "" -} else = [] +} From b3ac8f2c7a5919976364444e5c3af9d7ba7daad8 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:09:10 +0200 Subject: [PATCH 057/195] Update __YAMLscan.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/__YAMLscan.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frameworks/__YAMLscan.json b/frameworks/__YAMLscan.json index 8508dcebc..d7da504d7 100644 --- a/frameworks/__YAMLscan.json +++ b/frameworks/__YAMLscan.json @@ -2,6 +2,7 @@ "name": "YAML-scanning", "description": "Controls relevant to yamls", "attributes": { + "armoBuiltin": true }, "scanningScope": { "matches": [ @@ -59,4 +60,4 @@ "CVE-2022-24348-argocddirtraversal", "CVE-2022-0492-cgroups-container-escape" ] -} \ No newline at end of file +} From 51193763868ee81f45b7f9f68490ac592eaf1dd1 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:09:20 +0200 Subject: [PATCH 058/195] Update allcontrols.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/allcontrols.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index f4d738ade..f27692d1b 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -2,6 +2,7 @@ "name": "AllControls", "description": "Contains all the controls from all the frameworks", "attributes": { + "armoBuiltin": true }, "scanningScope": { "matches": [ @@ -384,4 +385,4 @@ } } ] -} \ No newline at end of file +} From 79ab4ee08b53ff7ff9d2e96283b0bd2305a40a96 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:09:55 +0200 Subject: [PATCH 059/195] Update armobest.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/armobest.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frameworks/armobest.json b/frameworks/armobest.json index 7f1b62931..811f37f32 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -2,6 +2,7 @@ "name": "ArmoBest", "description": "", "attributes": { + "armoBuiltin": true }, "scanningScope": { "matches": [ @@ -246,4 +247,4 @@ } } ] -} \ No newline at end of file +} From 20eb8c89242660488a198601b5e3cfeb5f84bacf Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:10:08 +0200 Subject: [PATCH 060/195] Update cis-aks-t1.2.0.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/cis-aks-t1.2.0.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frameworks/cis-aks-t1.2.0.json b/frameworks/cis-aks-t1.2.0.json index 65d785cce..21b6e6583 100644 --- a/frameworks/cis-aks-t1.2.0.json +++ b/frameworks/cis-aks-t1.2.0.json @@ -2,6 +2,7 @@ "name": "cis-aks-t1.2.0", "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", "attributes": { + "armoBuiltin": true, "version": "v1.2.0" }, "scanningScope": { @@ -687,4 +688,4 @@ } } } -} \ No newline at end of file +} From ed9b8af343b787d1202d5916fa0fa4d053e3b8e2 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:10:25 +0200 Subject: [PATCH 061/195] Update cis-eks-t1.2.0.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/cis-eks-t1.2.0.json | 1 + 1 file changed, 1 insertion(+) diff --git a/frameworks/cis-eks-t1.2.0.json b/frameworks/cis-eks-t1.2.0.json index 7619b9f80..1fb2123ea 100644 --- a/frameworks/cis-eks-t1.2.0.json +++ b/frameworks/cis-eks-t1.2.0.json @@ -2,6 +2,7 @@ "name": "cis-eks-t1.2.0", "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", "attributes": { + "armoBuiltin": true, "version": "v1.2.0" }, "scanningScope": { From f9aba881da64f0277a96361e5fda7f6ed56f7388 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:10:40 +0200 Subject: [PATCH 062/195] Update cis-v1.23-t1.0.1.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/cis-v1.23-t1.0.1.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frameworks/cis-v1.23-t1.0.1.json b/frameworks/cis-v1.23-t1.0.1.json index 4cc6d0829..b82c9ca6f 100644 --- a/frameworks/cis-v1.23-t1.0.1.json +++ b/frameworks/cis-v1.23-t1.0.1.json @@ -2,6 +2,7 @@ "name": "cis-v1.23-t1.0.1", "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", "attributes": { + "armoBuiltin": true, "version": "v1.0.1" }, "scanningScope": { @@ -1329,4 +1330,4 @@ } } ] -} \ No newline at end of file +} From ad959632343dccc40aeca42958bbde6c1abc35a7 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:10:52 +0200 Subject: [PATCH 063/195] Update clusterscan.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/clusterscan.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frameworks/clusterscan.json b/frameworks/clusterscan.json index 713fddf82..2b805b803 100644 --- a/frameworks/clusterscan.json +++ b/frameworks/clusterscan.json @@ -2,6 +2,7 @@ "name": "ClusterScan", "description": "Framework for scanning a cluster", "attributes": { + "armoBuiltin": true }, "typeTags": [ "security" @@ -140,4 +141,4 @@ } } ] -} \ No newline at end of file +} From cc832412ebe5f85ce347d5ddb82660511cdee590 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:11:02 +0200 Subject: [PATCH 064/195] Update devopsbest.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/devopsbest.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frameworks/devopsbest.json b/frameworks/devopsbest.json index f663274b3..daaa2212e 100644 --- a/frameworks/devopsbest.json +++ b/frameworks/devopsbest.json @@ -2,6 +2,7 @@ "name": "DevOpsBest", "description": "", "attributes": { + "armoBuiltin": true }, "scanningScope": { "matches": [ @@ -84,4 +85,4 @@ } } ] -} \ No newline at end of file +} From 65e6e5214c8f6cbc6491191452ecc1d3195c1cc0 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:11:11 +0200 Subject: [PATCH 065/195] Update mitre.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/mitre.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frameworks/mitre.json b/frameworks/mitre.json index 00b8ff94e..97f18650f 100644 --- a/frameworks/mitre.json +++ b/frameworks/mitre.json @@ -2,6 +2,7 @@ "name": "MITRE", "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", "attributes": { + "armoBuiltin": true }, "scanningScope": { "matches": [ @@ -174,4 +175,4 @@ } } ] -} \ No newline at end of file +} From 4857fe6d712fcfc405c6f26237b216156e7d7400 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:11:20 +0200 Subject: [PATCH 066/195] Update nsaframework.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/nsaframework.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frameworks/nsaframework.json b/frameworks/nsaframework.json index 09da0b0cf..68c17ef9c 100644 --- a/frameworks/nsaframework.json +++ b/frameworks/nsaframework.json @@ -2,6 +2,7 @@ "name": "NSA", "description": "Implement NSA security advices for K8s ", "attributes": { + "armoBuiltin": true }, "scanningScope": { "matches": [ @@ -156,4 +157,4 @@ } } ] -} \ No newline at end of file +} From f257d91741efa13dbb3e647135e04e093922950b Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:11:30 +0200 Subject: [PATCH 067/195] Update security.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/security.json | 1 + 1 file changed, 1 insertion(+) diff --git a/frameworks/security.json b/frameworks/security.json index 893c8c2bb..23a6b2765 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -2,6 +2,7 @@ "name": "security", "description": "Controls that are used to assess security threats.", "attributes": { + "armoBuiltin": true }, "typeTags": [ "security" From 79a944bcf6686007f8445f47ca84ba71885de089 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:11:40 +0200 Subject: [PATCH 068/195] Update workloadscan.json Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- frameworks/workloadscan.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frameworks/workloadscan.json b/frameworks/workloadscan.json index 74a031e9e..be2afbeab 100644 --- a/frameworks/workloadscan.json +++ b/frameworks/workloadscan.json @@ -2,6 +2,7 @@ "name": "WorkloadScan", "description": "Framework for scanning a workload", "attributes": { + "armoBuiltin": true }, "typeTags": [ "security" @@ -135,4 +136,4 @@ } } ] -} \ No newline at end of file +} From a8b22508f4ff0110c11e80956e6937c312cafeb7 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Tue, 28 Nov 2023 14:29:39 +0200 Subject: [PATCH 069/195] fix release --- .github/workflows/create-release.yaml | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml index 0230173a5..ff33e3ab8 100644 --- a/.github/workflows/create-release.yaml +++ b/.github/workflows/create-release.yaml @@ -16,24 +16,13 @@ env: REGO_ARTIFACT_PATH: release jobs: - # testing link checks - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - - name: Check links - uses: gaurav-nelson/github-action-markdown-link-check@5c5dfc0ac2e225883c0e5f03a85311ec2830d368 - with: - use-verbose-mode: 'yes' - # main job of testing and building the env. test_pr_checks: - needs: [markdown-link-check] permissions: pull-requests: write uses: kubescape/workflows/.github/workflows/go-basic-tests.yaml@main with: - GO_VERSION: 1.19 + GO_VERSION: '1.20' BUILD_PATH: github.com/kubescape/regolibrary/gitregostore/... secrets: inherit @@ -62,7 +51,7 @@ jobs: - name: Set up Go uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 with: - go-version: 1.19 + go-version: '1.20' - name: Test Regoes working-directory: testrunner From 8175bb0bb0e8bd87129c9a03c5645b796bc20acd Mon Sep 17 00:00:00 2001 From: Ben Date: Wed, 29 Nov 2023 23:41:47 +0200 Subject: [PATCH 070/195] Initial SOC2 framework Signed-off-by: Ben --- frameworks/soc2.json | 58 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 frameworks/soc2.json diff --git a/frameworks/soc2.json b/frameworks/soc2.json new file mode 100644 index 000000000..f3b0ad327 --- /dev/null +++ b/frameworks/soc2.json @@ -0,0 +1,58 @@ +{ + "name": "SOC2", + "description": "SOC2 compliance related controls", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": ["compliance"], + "activeControls": [ + { + "controlID": "C-0260", + "patch": { + "name": "Firewall (CC6.1,CC6.6,CC7.2)", + "description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management.", + "long_description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management.", + "remediation": "Define network policies for all workloads to protect unwanted access" + } + }, + { + "controlID": "C-0012", + "patch": { + "name": "Cryptographic key management - misplaced secrets (CC6.1,CC6.6,CC6.7)", + "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "long_description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel." + } + }, + { + "controlID": "C-0186", + "patch": { + "name": "Cryptographic key management - minimize access to secrets (CC6.1,CC6.6,CC6.7)", + "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "long_description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel." + } + }, + { + "controlID": "C-0035", + "patch": { + "name": "Access restriction to infrastructure - admin access (CC6.1 ,CC6.2, CC6.7, CC6.8)", + "description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities.", + "long_description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities." + } + }, + { + "controlID": "C-0067", + "patch": { + "name": "Event logging (CC6.8,CC7.1,CC7.2)", + "description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers.", + "long_description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers: - Logon attempts - Data deletions - Application and system errors - Changes to software and configuration settings - Changes to system files, configuration files or content files The logs are monitored by IT Operations staff and significant issues are investigated and resolved within a timely manner." + } + } + + ] +} From 9e44a41a76699be4b43a20e1618706872c988ff2 Mon Sep 17 00:00:00 2001 From: Ben Date: Thu, 30 Nov 2023 14:46:47 +0200 Subject: [PATCH 071/195] C-0263 control for detecting unencrypted ingress Signed-off-by: Ben --- controls/C-0263-ingress-tls.json | 20 ++++++++++++++ frameworks/soc2.json | 8 ++++++ rules/ingress-no-tls/raw.rego | 22 ++++++++++++++++ rules/ingress-no-tls/rule.metadata.json | 22 ++++++++++++++++ .../test/failed_with_ingress/expected.json | 26 +++++++++++++++++++ .../failed_with_ingress/input/ingress.yaml | 18 +++++++++++++ .../test/success_with_ingress/expected.json | 1 + .../success_with_ingress/input/ingress.yaml | 23 ++++++++++++++++ 8 files changed, 140 insertions(+) create mode 100644 controls/C-0263-ingress-tls.json create mode 100644 rules/ingress-no-tls/raw.rego create mode 100644 rules/ingress-no-tls/rule.metadata.json create mode 100644 rules/ingress-no-tls/test/failed_with_ingress/expected.json create mode 100644 rules/ingress-no-tls/test/failed_with_ingress/input/ingress.yaml create mode 100644 rules/ingress-no-tls/test/success_with_ingress/expected.json create mode 100644 rules/ingress-no-tls/test/success_with_ingress/input/ingress.yaml diff --git a/controls/C-0263-ingress-tls.json b/controls/C-0263-ingress-tls.json new file mode 100644 index 000000000..ce6b724f5 --- /dev/null +++ b/controls/C-0263-ingress-tls.json @@ -0,0 +1,20 @@ +{ + "name": "Ingress uses TLS", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control detect Ingress resources that do not use TLS", + "remediation": "The user needs to implement TLS for the Ingress resource in order to encrypt the incoming traffic", + "rulesNames": ["ingress-no-tls"], + "test": "Check if the Ingress resource has TLS configured", + "controlID": "C-0263", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster","file" + ] + } +} diff --git a/frameworks/soc2.json b/frameworks/soc2.json index f3b0ad327..667c4807b 100644 --- a/frameworks/soc2.json +++ b/frameworks/soc2.json @@ -52,6 +52,14 @@ "description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers.", "long_description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers: - Logon attempts - Data deletions - Application and system errors - Changes to software and configuration settings - Changes to system files, configuration files or content files The logs are monitored by IT Operations staff and significant issues are investigated and resolved within a timely manner." } + }, + { + "controlID": "C-0263", + "patch": { + "name": "Data in motion encryption - Ingress is TLS encrypted (CC6.1,CC6.6,CC6.7)", + "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", + "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." + } } ] diff --git a/rules/ingress-no-tls/raw.rego b/rules/ingress-no-tls/raw.rego new file mode 100644 index 000000000..769e2cea9 --- /dev/null +++ b/rules/ingress-no-tls/raw.rego @@ -0,0 +1,22 @@ +package armo_builtins + +# Checks if Ingress is connected to a service and a workload to expose something +deny[msga] { + ingress := input[_] + ingress.kind == "Ingress" + + # Check if ingress has TLS enabled + not ingress.spec.tls + + msga := { + "alertMessage": sprintf("Ingress '%v' has not TLS definition", [ingress.metadata.name]), + "packagename": "armo_builtins", + "failedPaths": [], + "fixPaths": [{ + "path": "spec.tls", + "value": "" + }], + "alertScore": 7, + "alertObject": {"k8sApiObjects": [ingress]} + } +} diff --git a/rules/ingress-no-tls/rule.metadata.json b/rules/ingress-no-tls/rule.metadata.json new file mode 100644 index 000000000..0afd57d8f --- /dev/null +++ b/rules/ingress-no-tls/rule.metadata.json @@ -0,0 +1,22 @@ +{ + "name": "ingress-no-tls", + "attributes": { + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "Ingress should not be configured without TLS", + "remediation": "", + "ruleQuery": "armo_builtins" +} diff --git a/rules/ingress-no-tls/test/failed_with_ingress/expected.json b/rules/ingress-no-tls/test/failed_with_ingress/expected.json new file mode 100644 index 000000000..18e7b81ff --- /dev/null +++ b/rules/ingress-no-tls/test/failed_with_ingress/expected.json @@ -0,0 +1,26 @@ +[ + { + "alertMessage": "Ingress 'my-ingress' has not TLS definition", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.tls", + "value": "\u003cyour-tls-definition\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "networking.k8s.io/v1", + "kind": "Ingress", + "metadata": { + "name": "my-ingress" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/ingress-no-tls/test/failed_with_ingress/input/ingress.yaml b/rules/ingress-no-tls/test/failed_with_ingress/input/ingress.yaml new file mode 100644 index 000000000..4cc9b174d --- /dev/null +++ b/rules/ingress-no-tls/test/failed_with_ingress/input/ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: my-ingress + namespace: default +spec: + ingressClassName: nginx + rules: + - host: myservicea.foo.org + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: my-service + port: + number: 80 diff --git a/rules/ingress-no-tls/test/success_with_ingress/expected.json b/rules/ingress-no-tls/test/success_with_ingress/expected.json new file mode 100644 index 000000000..fe51488c7 --- /dev/null +++ b/rules/ingress-no-tls/test/success_with_ingress/expected.json @@ -0,0 +1 @@ +[] diff --git a/rules/ingress-no-tls/test/success_with_ingress/input/ingress.yaml b/rules/ingress-no-tls/test/success_with_ingress/input/ingress.yaml new file mode 100644 index 000000000..bc34f9984 --- /dev/null +++ b/rules/ingress-no-tls/test/success_with_ingress/input/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: example-ingress-tls + namespace: default + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + tls: + - hosts: + - example.com + secretName: example-tls-secret + rules: + - host: example.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: example-service + port: + number: 80 From 13a78b00361a68fc6e511b3a1bbcb26c40df1cff Mon Sep 17 00:00:00 2001 From: Ben Date: Thu, 30 Nov 2023 15:41:10 +0200 Subject: [PATCH 072/195] C-0264 - detecting unencrypted volumes Signed-off-by: Ben --- controls/C-0264-pv-encrypted.json | 20 ++++++++ frameworks/soc2.json | 9 ++++ rules/pv-without-encryption/raw.rego | 46 +++++++++++++++++++ .../pv-without-encryption/rule.metadata.json | 33 +++++++++++++ .../test/aks/expected.json | 1 + .../test/aks/input/pv.yaml | 11 +++++ .../test/aks/input/sc.yaml | 9 ++++ .../test/eks/expected.json | 1 + .../test/eks/input/pv.yaml | 11 +++++ .../test/eks/input/sc.yaml | 10 ++++ .../test/fail/expected.json | 1 + .../test/fail/input/pv.yaml | 11 +++++ .../test/fail/input/sc.yaml | 11 +++++ .../test/gke/expected.json | 1 + .../test/gke/input/pv.yaml | 11 +++++ .../test/gke/input/sc.yaml | 9 ++++ 16 files changed, 195 insertions(+) create mode 100644 controls/C-0264-pv-encrypted.json create mode 100644 rules/pv-without-encryption/raw.rego create mode 100644 rules/pv-without-encryption/rule.metadata.json create mode 100644 rules/pv-without-encryption/test/aks/expected.json create mode 100644 rules/pv-without-encryption/test/aks/input/pv.yaml create mode 100644 rules/pv-without-encryption/test/aks/input/sc.yaml create mode 100644 rules/pv-without-encryption/test/eks/expected.json create mode 100644 rules/pv-without-encryption/test/eks/input/pv.yaml create mode 100644 rules/pv-without-encryption/test/eks/input/sc.yaml create mode 100644 rules/pv-without-encryption/test/fail/expected.json create mode 100644 rules/pv-without-encryption/test/fail/input/pv.yaml create mode 100644 rules/pv-without-encryption/test/fail/input/sc.yaml create mode 100644 rules/pv-without-encryption/test/gke/expected.json create mode 100644 rules/pv-without-encryption/test/gke/input/pv.yaml create mode 100644 rules/pv-without-encryption/test/gke/input/sc.yaml diff --git a/controls/C-0264-pv-encrypted.json b/controls/C-0264-pv-encrypted.json new file mode 100644 index 000000000..e13131027 --- /dev/null +++ b/controls/C-0264-pv-encrypted.json @@ -0,0 +1,20 @@ +{ + "name": "PersistentVolume without encyption", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control detects PersistentVolumes without encyption", + "remediation": "Enable encryption on the PersistentVolume using the configuration in StorageClass", + "rulesNames": ["pv-without-encryption"], + "test": "Checking all PersistentVolumes via their StorageClass for encryption", + "controlID": "C-0264", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } +} diff --git a/frameworks/soc2.json b/frameworks/soc2.json index 667c4807b..03aa66125 100644 --- a/frameworks/soc2.json +++ b/frameworks/soc2.json @@ -60,7 +60,16 @@ "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." } + }, + { + "controlID": "C-0264", + "patch": { + "name": "Data in rest encryption - Persistent Volumes are encrypted (CC1.1,CC6.7)", + "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", + "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." + } } + ] } diff --git a/rules/pv-without-encryption/raw.rego b/rules/pv-without-encryption/raw.rego new file mode 100644 index 000000000..96b62c375 --- /dev/null +++ b/rules/pv-without-encryption/raw.rego @@ -0,0 +1,46 @@ +package armo_builtins + +# Checks if Ingress is connected to a service and a workload to expose something +deny[msga] { + pv := input[_] + pv.kind == "PersistentVolume" + + # Find the related storage class + storageclass := input[_] + storageclass.kind == "StorageClass" + pv.spec.storageClassName == storageclass.metadata.name + + # Check if storage class is encrypted + not is_storage_class_encrypted(storageclass) + + msga := { + "alertMessage": sprintf("Volume '%v' has is using a storage class that does not use encryption", [pv.metadata.name]), + "packagename": "armo_builtins", + "failedPaths": [], + "fixPaths": [{ + "path": "pv.spec.storageClassName", + "value": "" + }], + "alertScore": 7, + "alertObject": {"k8sApiObjects": [pv]} + } +} + +# Storage class is encrypted - AWS +is_storage_class_encrypted(storageclass) { + storageclass.parameters.encrypted == "true" +} + +# Storage class is encrypted - Azure +is_storage_class_encrypted(storageclass) { + storageclass.provisioner + contains(storageclass.provisioner,"azure") +} + +# Storage class is encrypted - GCP +is_storage_class_encrypted(storageclass) { + # GKE encryption is enabled by default https://cloud.google.com/blog/products/containers-kubernetes/exploring-container-security-use-your-own-keys-to-protect-your-data-on-gke + storageclass.provisioner + contains(storageclass.provisioner,"csi.storage.gke.io") +} + diff --git a/rules/pv-without-encryption/rule.metadata.json b/rules/pv-without-encryption/rule.metadata.json new file mode 100644 index 000000000..23453c4ca --- /dev/null +++ b/rules/pv-without-encryption/rule.metadata.json @@ -0,0 +1,33 @@ +{ + "name": "pv-without-encryption", + "attributes": { + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PersistentVolume" + ] + }, + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "StorageClass" + ] + } + ], + "description": "PersistentVolume without encryption", + "remediation": "", + "ruleQuery": "armo_builtins" +} diff --git a/rules/pv-without-encryption/test/aks/expected.json b/rules/pv-without-encryption/test/aks/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/pv-without-encryption/test/aks/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/pv-without-encryption/test/aks/input/pv.yaml b/rules/pv-without-encryption/test/aks/input/pv.yaml new file mode 100644 index 000000000..d1d8beb89 --- /dev/null +++ b/rules/pv-without-encryption/test/aks/input/pv.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pvc-0eeeeefe-5193-472c-a81e-104f3919130e +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 40Gi + persistentVolumeReclaimPolicy: Retain + storageClassName: azure-disk-cmk \ No newline at end of file diff --git a/rules/pv-without-encryption/test/aks/input/sc.yaml b/rules/pv-without-encryption/test/aks/input/sc.yaml new file mode 100644 index 000000000..3dfeb0ea0 --- /dev/null +++ b/rules/pv-without-encryption/test/aks/input/sc.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: azure-disk-cmk +provisioner: kubernetes.io/azure-disk +parameters: + skuname: Standard_LRS + kind: Managed + diskEncryptionSetID: /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSet-name} diff --git a/rules/pv-without-encryption/test/eks/expected.json b/rules/pv-without-encryption/test/eks/expected.json new file mode 100644 index 000000000..2654377a9 --- /dev/null +++ b/rules/pv-without-encryption/test/eks/expected.json @@ -0,0 +1 @@ +[{"alertMessage":"Volume 'pvc-0eeeeefe-5193-472c-a81e-104f3919130e' has is using a storage class that does not use encryption","failedPaths":[],"fixPaths":[{"path":"pv.spec.storageClassName","value":"\u003cyour encrypted storage class\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":7,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"PersistentVolume","metadata":{"name":"pvc-0eeeeefe-5193-472c-a81e-104f3919130e"}}]}}] \ No newline at end of file diff --git a/rules/pv-without-encryption/test/eks/input/pv.yaml b/rules/pv-without-encryption/test/eks/input/pv.yaml new file mode 100644 index 000000000..42efa09f0 --- /dev/null +++ b/rules/pv-without-encryption/test/eks/input/pv.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pvc-0eeeeefe-5193-472c-a81e-104f3919130e +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 40Gi + persistentVolumeReclaimPolicy: Retain + storageClassName: gp3retain \ No newline at end of file diff --git a/rules/pv-without-encryption/test/eks/input/sc.yaml b/rules/pv-without-encryption/test/eks/input/sc.yaml new file mode 100644 index 000000000..f6fa35be8 --- /dev/null +++ b/rules/pv-without-encryption/test/eks/input/sc.yaml @@ -0,0 +1,10 @@ +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gp3retain +parameters: + type: gp3 +provisioner: ebs.csi.aws.com +reclaimPolicy: Retain +volumeBindingMode: WaitForFirstConsumer \ No newline at end of file diff --git a/rules/pv-without-encryption/test/fail/expected.json b/rules/pv-without-encryption/test/fail/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/pv-without-encryption/test/fail/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/pv-without-encryption/test/fail/input/pv.yaml b/rules/pv-without-encryption/test/fail/input/pv.yaml new file mode 100644 index 000000000..42efa09f0 --- /dev/null +++ b/rules/pv-without-encryption/test/fail/input/pv.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pvc-0eeeeefe-5193-472c-a81e-104f3919130e +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 40Gi + persistentVolumeReclaimPolicy: Retain + storageClassName: gp3retain \ No newline at end of file diff --git a/rules/pv-without-encryption/test/fail/input/sc.yaml b/rules/pv-without-encryption/test/fail/input/sc.yaml new file mode 100644 index 000000000..d60feef9f --- /dev/null +++ b/rules/pv-without-encryption/test/fail/input/sc.yaml @@ -0,0 +1,11 @@ +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gp3retain +parameters: + encrypted: "true" + type: gp3 +provisioner: ebs.csi.aws.com +reclaimPolicy: Retain +volumeBindingMode: WaitForFirstConsumer \ No newline at end of file diff --git a/rules/pv-without-encryption/test/gke/expected.json b/rules/pv-without-encryption/test/gke/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/pv-without-encryption/test/gke/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/pv-without-encryption/test/gke/input/pv.yaml b/rules/pv-without-encryption/test/gke/input/pv.yaml new file mode 100644 index 000000000..ac9a1bc67 --- /dev/null +++ b/rules/pv-without-encryption/test/gke/input/pv.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pvc-0eeeeefe-5193-472c-a81e-104f3919130e +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 40Gi + persistentVolumeReclaimPolicy: Retain + storageClassName: standard-cmek \ No newline at end of file diff --git a/rules/pv-without-encryption/test/gke/input/sc.yaml b/rules/pv-without-encryption/test/gke/input/sc.yaml new file mode 100644 index 000000000..7242e5664 --- /dev/null +++ b/rules/pv-without-encryption/test/gke/input/sc.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: standard-cmek +provisioner: pd.csi.storage.gke.io +parameters: + type: pd-standard + csi.storage.k8s.io/fstype: ext4 + pdName: projects/my-project/locations/global/keyRings/my-keyring/cryptoKeys/my-key From c010f45723ea2528ec6a3a072fddabb73d55240e Mon Sep 17 00:00:00 2001 From: kooomix Date: Mon, 11 Dec 2023 08:58:01 +0200 Subject: [PATCH 073/195] fix control C-0004 Signed-off-by: kooomix --- .../raw.rego | 18 +++++++++++------- .../test/pod_pass/data.json | 6 ++++++ .../test/pod_pass/expected.json | 1 + .../test/pod_pass/input/pod.yaml | 15 +++++++++++++++ 4 files changed, 33 insertions(+), 7 deletions(-) create mode 100644 rules/resources-memory-limit-and-request/test/pod_pass/data.json create mode 100644 rules/resources-memory-limit-and-request/test/pod_pass/expected.json create mode 100644 rules/resources-memory-limit-and-request/test/pod_pass/input/pod.yaml diff --git a/rules/resources-memory-limit-and-request/raw.rego b/rules/resources-memory-limit-and-request/raw.rego index 799c80831..c33c03c4e 100644 --- a/rules/resources-memory-limit-and-request/raw.rego +++ b/rules/resources-memory-limit-and-request/raw.rego @@ -296,6 +296,7 @@ is_min_request_exceeded_memory(memory_req) { compare_min(memory_req_min, memory_req) } + ############## # helpers @@ -305,7 +306,7 @@ compare_max(max, given) { endswith(given, "Mi") split_max := split(max, "Mi")[0] split_given := split(given, "Mi")[0] - split_given > split_max + to_number(split_given) > to_number(split_max) } compare_max(max, given) { @@ -313,7 +314,7 @@ compare_max(max, given) { endswith(given, "M") split_max := split(max, "M")[0] split_given := split(given, "M")[0] - split_given > split_max + to_number(split_given) > to_number(split_max) } compare_max(max, given) { @@ -321,7 +322,7 @@ compare_max(max, given) { endswith(given, "m") split_max := split(max, "m")[0] split_given := split(given, "m")[0] - split_given > split_max + to_number(split_given) > to_number(split_max) } compare_max(max, given) { @@ -337,7 +338,7 @@ compare_min(min, given) { endswith(given, "Mi") split_min := split(min, "Mi")[0] split_given := split(given, "Mi")[0] - split_given < split_min + to_number(split_given) < to_number(split_min) } compare_min(min, given) { @@ -345,7 +346,8 @@ compare_min(min, given) { endswith(given, "M") split_min := split(min, "M")[0] split_given := split(given, "M")[0] - split_given < split_min + to_number(split_given) < to_number(split_min) + } compare_min(min, given) { @@ -353,13 +355,15 @@ compare_min(min, given) { endswith(given, "m") split_min := split(min, "m")[0] split_given := split(given, "m")[0] - split_given < split_min + to_number(split_given) < to_number(split_min) + } compare_min(min, given) { not is_special_measure(min) not is_special_measure(given) - given < min + to_number(given) < to_number(min) + } # Check that is same unit diff --git a/rules/resources-memory-limit-and-request/test/pod_pass/data.json b/rules/resources-memory-limit-and-request/test/pod_pass/data.json new file mode 100644 index 000000000..7fc81fd94 --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod_pass/data.json @@ -0,0 +1,6 @@ +{ + "postureControlInputs": { + "memory_limit_max": ["256Mi"], + "memory_request_max": ["128Mi"] + } +} \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/test/pod_pass/expected.json b/rules/resources-memory-limit-and-request/test/pod_pass/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod_pass/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/resources-memory-limit-and-request/test/pod_pass/input/pod.yaml b/rules/resources-memory-limit-and-request/test/pod_pass/input/pod.yaml new file mode 100644 index 000000000..e84566463 --- /dev/null +++ b/rules/resources-memory-limit-and-request/test/pod_pass/input/pod.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" \ No newline at end of file From 52c2791ccb804a357b0fab4822b9225488a3d512 Mon Sep 17 00:00:00 2001 From: kooomix Date: Wed, 13 Dec 2023 11:17:52 +0200 Subject: [PATCH 074/195] fix control C-0050 Signed-off-by: kooomix --- rules/resources-cpu-limit-and-request/raw.rego | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/rules/resources-cpu-limit-and-request/raw.rego b/rules/resources-cpu-limit-and-request/raw.rego index 2baf4abb2..c3b9672cf 100644 --- a/rules/resources-cpu-limit-and-request/raw.rego +++ b/rules/resources-cpu-limit-and-request/raw.rego @@ -350,7 +350,7 @@ compare_max(max, given) { endswith(given, "Mi") split_max := split(max, "Mi")[0] split_given := split(given, "Mi")[0] - split_given > split_max + to_number(split_given) > to_number(split_max) } compare_max(max, given) { @@ -358,7 +358,7 @@ compare_max(max, given) { endswith(given, "M") split_max := split(max, "M")[0] split_given := split(given, "M")[0] - split_given > split_max + to_number(split_given) > to_number(split_max) } compare_max(max, given) { @@ -366,13 +366,13 @@ compare_max(max, given) { endswith(given, "m") split_max := split(max, "m")[0] split_given := split(given, "m")[0] - split_given > split_max + to_number(split_given) > to_number(split_max) } compare_max(max, given) { not is_special_measure(max) not is_special_measure(given) - given > max + to_number(given) > to_number(max) } @@ -384,7 +384,7 @@ compare_min(min, given) { endswith(given, "Mi") split_min := split(min, "Mi")[0] split_given := split(given, "Mi")[0] - split_given < split_min + to_number(split_given) < to_number(split_min) } compare_min(min, given) { @@ -392,7 +392,7 @@ compare_min(min, given) { endswith(given, "M") split_min := split(min, "M")[0] split_given := split(given, "M")[0] - split_given < split_min + to_number(split_given) < to_number(split_min) } compare_min(min, given) { @@ -400,13 +400,15 @@ compare_min(min, given) { endswith(given, "m") split_min := split(min, "m")[0] split_given := split(given, "m")[0] - split_given < split_min + to_number(split_given) < to_number(split_min) + } compare_min(min, given) { not is_special_measure(min) not is_special_measure(given) - given < min + to_number(given) < to_number(min) + } From 8b957133a6e9fe90e4983d8f5774fe3f0cafb415 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Wed, 13 Dec 2023 15:52:01 +0200 Subject: [PATCH 075/195] removing control 0086 + CVE 2022 0492 --- ControlID_RuleName.csv | 1 - FWName_CID_CName.csv | 2 - ...086-cve20220492cgroupscontainerescape.json | 27 -- frameworks/__YAMLscan.json | 1 - frameworks/allcontrols.json | 6 - frameworks/armobest.json | 6 - rules/CVE-2022-0492/raw.rego | 377 ------------------ rules/CVE-2022-0492/rule.metadata.json | 50 --- .../test/ca_dac_override_pass/expected.json | 1 - .../ca_dac_override_pass/input/deploy.yaml | 68 ---- .../test/cap_dac_override_fail/expected.json | 21 - .../cap_dac_override_fail/input/deploy.yaml | 71 ---- .../test/no_new_privs_fail/expected.json | 24 -- .../test/no_new_privs_fail/input/cronjob.yaml | 19 - .../test/no_new_privs_pass/expected.json | 1 - .../test/no_new_privs_pass/input/cronjob.yaml | 21 - .../test/root_user_fail/expected.json | 18 - .../test/root_user_fail/input/pod.yaml | 12 - .../test/root_user_pass/expected.json | 1 - .../test/root_user_pass/input/pod.yaml | 15 - 20 files changed, 742 deletions(-) delete mode 100644 controls/C-0086-cve20220492cgroupscontainerescape.json delete mode 100644 rules/CVE-2022-0492/raw.rego delete mode 100644 rules/CVE-2022-0492/rule.metadata.json delete mode 100644 rules/CVE-2022-0492/test/ca_dac_override_pass/expected.json delete mode 100644 rules/CVE-2022-0492/test/ca_dac_override_pass/input/deploy.yaml delete mode 100644 rules/CVE-2022-0492/test/cap_dac_override_fail/expected.json delete mode 100644 rules/CVE-2022-0492/test/cap_dac_override_fail/input/deploy.yaml delete mode 100644 rules/CVE-2022-0492/test/no_new_privs_fail/expected.json delete mode 100644 rules/CVE-2022-0492/test/no_new_privs_fail/input/cronjob.yaml delete mode 100644 rules/CVE-2022-0492/test/no_new_privs_pass/expected.json delete mode 100644 rules/CVE-2022-0492/test/no_new_privs_pass/input/cronjob.yaml delete mode 100644 rules/CVE-2022-0492/test/root_user_fail/expected.json delete mode 100644 rules/CVE-2022-0492/test/root_user_fail/input/pod.yaml delete mode 100644 rules/CVE-2022-0492/test/root_user_pass/expected.json delete mode 100644 rules/CVE-2022-0492/test/root_user_pass/input/pod.yaml diff --git a/ControlID_RuleName.csv b/ControlID_RuleName.csv index 51235bb80..2cfb21a35 100644 --- a/ControlID_RuleName.csv +++ b/ControlID_RuleName.csv @@ -77,7 +77,6 @@ C-0082,read-only-port-enabled C-0083,exposed-critical-pods C-0084,exposed-rce-pods C-0085,excessive_amount_of_vulnerabilities_pods -C-0086,CVE-2022-0492 C-0087,CVE-2022-23648 C-0088,rbac-enabled-cloud C-0088,rbac-enabled-native diff --git a/FWName_CID_CName.csv b/FWName_CID_CName.csv index 44a74aa54..c9e84ed77 100644 --- a/FWName_CID_CName.csv +++ b/FWName_CID_CName.csv @@ -55,7 +55,6 @@ AllControls,C-0077,K8s common labels usage AllControls,C-0078,Images from allowed registry AllControls,C-0079,CVE-2022-0185-linux-kernel-container-escape AllControls,C-0081,CVE-2022-24348-argocddirtraversal -AllControls,C-0086,CVE-2022-0492-cgroups-container-escape AllControls,C-0087,CVE-2022-23648-containerd-fs-escape AllControls,C-0088,RBAC enabled AllControls,C-0090,CVE-2022-39328-grafana-auth-bypass @@ -93,7 +92,6 @@ ArmoBest,C-0070,Enforce Kubelet client TLS authentication ArmoBest,C-0078,Images from allowed registry ArmoBest,C-0079,CVE-2022-0185-linux-kernel-container-escape ArmoBest,C-0081,CVE-2022-24348-argocddirtraversal -ArmoBest,C-0086,CVE-2022-0492-cgroups-container-escape ArmoBest,C-0087,CVE-2022-23648-containerd-fs-escape ArmoBest,C-0089,CVE-2022-3172-aggregated-API-server-redirect ArmoBest,C-0091,CVE-2022-47633-kyverno-signature-bypass diff --git a/controls/C-0086-cve20220492cgroupscontainerescape.json b/controls/C-0086-cve20220492cgroupscontainerescape.json deleted file mode 100644 index 51772380a..000000000 --- a/controls/C-0086-cve20220492cgroupscontainerescape.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "name": "CVE-2022-0492-cgroups-container-escape", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", - "rulesNames": [ - "CVE-2022-0492" - ], - "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", - "controlID": "C-0086", - "baseScore": 4.0, - "example": "", - "category": { - "name" : "Workload" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - } -} \ No newline at end of file diff --git a/frameworks/__YAMLscan.json b/frameworks/__YAMLscan.json index d7da504d7..f719a0154 100644 --- a/frameworks/__YAMLscan.json +++ b/frameworks/__YAMLscan.json @@ -58,6 +58,5 @@ "K8s common labels usage", "Images from allowed registry", "CVE-2022-24348-argocddirtraversal", - "CVE-2022-0492-cgroups-container-escape" ] } diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index f27692d1b..f8f28441c 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -348,12 +348,6 @@ "name": "CVE-2022-24348-argocddirtraversal" } }, - { - "controlID": "C-0086", - "patch": { - "name": "CVE-2022-0492-cgroups-container-escape" - } - }, { "controlID": "C-0087", "patch": { diff --git a/frameworks/armobest.json b/frameworks/armobest.json index 811f37f32..158dfba39 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -210,12 +210,6 @@ "name": "CVE-2022-24348-argocddirtraversal" } }, - { - "controlID": "C-0086", - "patch": { - "name": "CVE-2022-0492-cgroups-container-escape" - } - }, { "controlID": "C-0087", "patch": { diff --git a/rules/CVE-2022-0492/raw.rego b/rules/CVE-2022-0492/raw.rego deleted file mode 100644 index 13fd8f7f3..000000000 --- a/rules/CVE-2022-0492/raw.rego +++ /dev/null @@ -1,377 +0,0 @@ -package armo_builtins - - -# Case 1: -# - Container runs as root OR allows privilege escalation (allowPrivilegeEscalation = true or not present), AND -# - No AppArmor , AND -# - No SELinux, AND -# - No Seccomp -# If container is privileged or has CAP_SYS_ADMIN, don't fail - -deny[msga] { - pod := input[_] - pod.kind == "Pod" - container := pod.spec.containers[i] - - # Path to send - start_of_path := "spec" - - # If container is privileged or has CAP_SYS_ADMIN, pass - not container.securityContext.privileged == true - not is_cap_sys_admin(container, start_of_path) - - - is_no_SELinux_No_AppArmor_Pod(pod) - is_no_seccomp_pod(pod) - - is_no_SELinux_container(container) - is_no_Seccomp_Container(container) - - # Check if is running as root - alertInfo := evaluate_workload_non_root_container(container, pod, start_of_path) - - # CAP_DAC_OVERRIDE will fail on second check - not isCAP_DAC_OVERRIDE(container, start_of_path, i) - - # Get paths - fixPath := get_fixed_path(alertInfo, i) - failed_path := get_failed_path(alertInfo, i) - - - msga := { - "alertMessage": "You may be vulnerable to CVE-2022-0492", - "packagename": "armo_builtins", - "alertScore": 4, - "reviewPaths": failed_path, - "failedPaths": failed_path, - "fixPaths": fixPath, - "alertObject": { - "k8sApiObjects": [pod] - } - } -} - -deny[msga] { - wl := input[_] - spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} - spec_template_spec_patterns[wl.kind] - start_of_path := "spec.template.spec" - - pod := wl.spec.template - container := pod.spec.containers[i] - - # If container is privileged or has CAP_SYS_ADMIN, pass - not container.securityContext.privileged == true - not is_cap_sys_admin(container, start_of_path) - - - is_no_SELinux_No_AppArmor_Pod(pod) - is_no_seccomp_pod(pod) - - is_no_SELinux_container(container) - is_no_Seccomp_Container(container) - - # Check if is running as root - alertInfo := evaluate_workload_non_root_container(container, pod, start_of_path) - - # CAP_DAC_OVERRIDE will fail on second check - not isCAP_DAC_OVERRIDE(container, start_of_path, i) - - # Get paths - fixPath := get_fixed_path(alertInfo, i) - failed_path := get_failed_path(alertInfo, i) - - - msga := { - "alertMessage": "You may be vulnerable to CVE-2022-0492", - "packagename": "armo_builtins", - "alertScore": 4, - "reviewPaths": failed_path, - "failedPaths": failed_path, - "fixPaths": fixPath, - "alertObject": { - "k8sApiObjects": [wl] - } - } -} - -deny[msga] { - wl := input[_] - wl.kind == "CronJob" - start_of_path := "spec.jobTemplate.spec.template.spec" - - pod := wl.spec.jobTemplate.spec.template - container = pod.spec.containers[i] - - # If container is privileged or has CAP_SYS_ADMIN, pass - not container.securityContext.privileged == true - not is_cap_sys_admin(container, start_of_path) - - - is_no_SELinux_No_AppArmor_Pod(pod) - is_no_seccomp_pod(pod) - - is_no_SELinux_container(container) - is_no_Seccomp_Container(container) - - # Check if is running as root - alertInfo := evaluate_workload_non_root_container(container, pod, start_of_path) - - # CAP_DAC_OVERRIDE will fail on second check - not isCAP_DAC_OVERRIDE(container, start_of_path, i) - - # Get paths - fixPath := get_fixed_path(alertInfo, i) - failed_path := get_failed_path(alertInfo, i) - - msga := { - "alertMessage": "You may be vulnerable to CVE-2022-0492", - "packagename": "armo_builtins", - "alertScore": 4, - "reviewPaths": failed_path, - "failedPaths": failed_path, - "fixPaths": fixPath, - "alertObject": { - "k8sApiObjects": [wl] - } - } -} - - -################################################################################# -# Case 2: -# - Container has CAP_DAC_OVERRIDE capability, AND -# - No AppArmor, AND -# - No SELinux -# If container is privileged or has CAP_SYS_ADMIN, don't fail - -deny[msga] { - pod := input[_] - pod.kind == "Pod" - container := pod.spec.containers[i] - - start_of_path := "spec." - - result := isCAP_DAC_OVERRIDE(container, start_of_path, i) - - # If container is privileged or has CAP_SYS_ADMIN, pass - not container.securityContext.privileged == true - not is_cap_sys_admin(container, start_of_path) - - is_no_SELinux_No_AppArmor_Pod(pod) - is_no_SELinux_container(container) - - msga := { - "alertMessage": "You may be vulnerable to CVE-2022-0492", - "packagename": "armo_builtins", - "alertScore": 4, - "reviewPaths": [result], - "failedPaths": [result], - "fixPaths": [], - "alertObject": { - "k8sApiObjects": [pod] - } - } -} - -deny[msga] { - wl := input[_] - spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} - spec_template_spec_patterns[wl.kind] - - pod := wl.spec.template - container := pod.spec.containers[i] - - start_of_path := "spec.template.spec." - - result := isCAP_DAC_OVERRIDE(container, start_of_path, i) - - # If container is privileged or has CAP_SYS_ADMIN, pass - not container.securityContext.privileged == true - not is_cap_sys_admin(container, start_of_path) - - is_no_SELinux_No_AppArmor_Pod(pod) - is_no_SELinux_container(container) - - msga := { - "alertMessage": "You may be vulnerable to CVE-2022-0492", - "packagename": "armo_builtins", - "alertScore": 4, - "reviewPaths": [result], - "failedPaths": [result], - "fixPaths": [], - "alertObject": { - "k8sApiObjects": [wl] - } - } -} - -deny[msga] { - wl := input[_] - wl.kind == "CronJob" - - pod := wl.spec.jobTemplate.spec.template - container = pod.spec.containers[i] - - start_of_path := "spec.jobTemplate.spec.template.spec." - - result := isCAP_DAC_OVERRIDE(container, start_of_path, i) - - # If container is privileged or has CAP_SYS_ADMIN, pass - not container.securityContext.privileged == true - not is_cap_sys_admin(container, start_of_path) - - is_no_SELinux_No_AppArmor_Pod(pod) - is_no_SELinux_container(container) - - msga := { - "alertMessage": "You may be vulnerable to CVE-2022-0492", - "packagename": "armo_builtins", - "alertScore": 4, - "reviewPaths": [result], - "failedPaths": [result], - "fixPaths": [], - "alertObject": { - "k8sApiObjects": [wl] - } - } -} - - - - -is_cap_sys_admin(container, start_of_path) { - capability = container.securityContext.capabilities.add[k] - capability == "SYS_ADMIN" -} - -isCAP_DAC_OVERRIDE(container, start_of_path, i) = path { - capability = container.securityContext.capabilities.add[k] - capability == "DAC_OVERRIDE" - path = sprintf("%vcontainers[%v].securityContext.capabilities.add[%v]", [start_of_path, format_int(i, 10), format_int(k, 10)]) -} - - - -################################################################################# - -get_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,"container_ndx",format_int(i,10))] { - alertInfo.failed_path != "" -} else = [] - - -get_fixed_path(alertInfo, i) = [{"path":replace(alertInfo.fixPath[0].path,"container_ndx",format_int(i,10)), "value":alertInfo.fixPath[0].value}, {"path":replace(alertInfo.fixPath[1].path,"container_ndx",format_int(i,10)), "value":alertInfo.fixPath[1].value}]{ - count(alertInfo.fixPath) == 2 -} else = [{"path":replace(alertInfo.fixPath[0].path,"container_ndx",format_int(i,10)), "value":alertInfo.fixPath[0].value}] { - count(alertInfo.fixPath) == 1 -} else = [] - - - - - -################################################################################# - -# Check if appArmor or SELinux or seccompProfile is used -# Fails if none of them is used -is_no_SELinux_No_AppArmor_Pod(pod){ - not pod.spec.securityContext.seLinuxOptions - annotations := [pod.metadata.annotations[i] | annotaion = i; startswith(i, "container.apparmor.security.beta.kubernetes.io")] - not count(annotations) > 0 -} - -is_no_SELinux_container(container){ - not container.securityContext.seLinuxOptions -} - -is_no_seccomp_pod(pod) { - not pod.spec.securityContext.seccompProfile -} - -is_no_Seccomp_Container(container) { - not container.securityContext.seccompProfile -} - - - - - - -################################################################################# -# Workload evaluation - -evaluate_workload_non_root_container(container, pod, start_of_path) = alertInfo { - runAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path) - runAsNonRootValue.value == false - - runAsUserValue := get_run_as_user_value(container, pod, start_of_path) - runAsUserValue.value == 0 - - alertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue) -} else = alertInfo { - allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, start_of_path) - allowPrivilegeEscalationValue.value == true - - alertInfo := allowPrivilegeEscalationValue -} - - -################################################################################# - -# Checking for non-root and allowPrivilegeEscalation enabled -get_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot { - failed_path := sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]) - runAsNonRoot := {"value" : container.securityContext.runAsNonRoot, "failed_path" : failed_path, "fixPath": [] ,"defined" : true} -} else = runAsNonRoot { - failed_path := sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]) - runAsNonRoot := {"value" : pod.spec.securityContext.runAsNonRoot, "failed_path" : failed_path, "fixPath": [], "defined" : true} -} else = {"value" : false, "failed_path" : "", "fixPath": [{"path": "spec.securityContext.runAsNonRoot", "value":"true"}], "defined" : false} { - is_allow_privilege_escalation_field(container, pod) -} else = {"value" : false, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]) , "value":"true"}, {"path":sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], "defined" : false} - -get_run_as_user_value(container, pod, start_of_path) = runAsUser { - failed_path := sprintf("%v.containers[container_ndx].securityContext.runAsUser", [start_of_path]) - runAsUser := {"value" : container.securityContext.runAsUser, "failed_path" : failed_path, "fixPath": [], "defined" : true} -} else = runAsUser { - failed_path := sprintf("%v.containers[container_ndx].securityContext.runAsUser", [start_of_path]) - runAsUser := {"value" : pod.spec.securityContext.runAsUser, "failed_path" : failed_path, "fixPath": [],"defined" : true} -} else = {"value" : 0, "failed_path": "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}],"defined" : false}{ - is_allow_privilege_escalation_field(container, pod) -} else = {"value" : 0, "failed_path": "", - "fixPath": [{"path": sprintf("%v.securityContext.containers[container_ndx].runAsNonRoot", [start_of_path]), "value":"true"},{"path": sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], - "defined" : false} - -get_run_as_group_value(container, pod, start_of_path) = runAsGroup { - failed_path := sprintf("%v.containers[container_ndx].securityContext.runAsGroup", [start_of_path]) - runAsGroup := {"value" : container.securityContext.runAsGroup, "failed_path" : failed_path, "fixPath": [],"defined" : true} -} else = runAsGroup { - failed_path := sprintf("%v.containers[container_ndx].securityContext.runAsGroup", [start_of_path]) - runAsGroup := {"value" : pod.spec.securityContext.runAsGroup, "failed_path" : failed_path, "fixPath":[], "defined" : true} -} else = {"value" : 0, "failed_path": "", "fixPath": [{"path": "spec.securityContext.runAsNonRoot", "value":"true"}], "defined" : false}{ - is_allow_privilege_escalation_field(container, pod) -} else = {"value" : 0, "failed_path": "", - "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"},{"path": sprintf("%v.securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], - "defined" : false -} - -get_allow_privilege_escalation(container, pod, start_of_path) = allowPrivilegeEscalation { - failed_path := sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]) - allowPrivilegeEscalation := {"value" : container.securityContext.allowPrivilegeEscalation, "failed_path" : failed_path, "fixPath": [],"defined" : true} -} else = allowPrivilegeEscalation { - failed_path := sprintf("%v.securityContext.allowPrivilegeEscalation", [start_of_path]) - allowPrivilegeEscalation := {"value" : pod.spec.securityContext.allowPrivilegeEscalation, "failed_path" : failed_path, "fixPath": [],"defined" : true} -} else = {"value" : true, "failed_path": "", "fixPath": [{"path": sprintf("%v.securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], "defined" : false} - -choose_first_if_defined(l1, l2) = c { - l1.defined - c := l1 -} else = l2 - - -is_allow_privilege_escalation_field(container, pod) { - container.securityContext.allowPrivilegeEscalation == false -} - -is_allow_privilege_escalation_field(container, pod) { - pod.spec.securityContext.allowPrivilegeEscalation == false -} diff --git a/rules/CVE-2022-0492/rule.metadata.json b/rules/CVE-2022-0492/rule.metadata.json deleted file mode 100644 index 31e1d6b06..000000000 --- a/rules/CVE-2022-0492/rule.metadata.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "name": "CVE-2022-0492", - "attributes": { - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - ], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins" -} \ No newline at end of file diff --git a/rules/CVE-2022-0492/test/ca_dac_override_pass/expected.json b/rules/CVE-2022-0492/test/ca_dac_override_pass/expected.json deleted file mode 100644 index 0637a088a..000000000 --- a/rules/CVE-2022-0492/test/ca_dac_override_pass/expected.json +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/rules/CVE-2022-0492/test/ca_dac_override_pass/input/deploy.yaml b/rules/CVE-2022-0492/test/ca_dac_override_pass/input/deploy.yaml deleted file mode 100644 index 181d1889d..000000000 --- a/rules/CVE-2022-0492/test/ca_dac_override_pass/input/deploy.yaml +++ /dev/null @@ -1,68 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - deployment.kubernetes.io/revision: "1" - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"nginx"},"name":"nginx-deployment","namespace":"default"},"spec":{"replicas":3,"selector":{"matchLabels":{"app":"nginx"}},"template":{"metadata":{"labels":{"app":"nginx"}},"spec":{"containers":[{"image":"nginx:1.14.2","name":"nginx","ports":[{"containerPort":80}]}],"securityContext":{"runAsNonRoot":false}}}}} - creationTimestamp: "2022-03-07T15:57:36Z" - generation: 1 - labels: - app: nginx - name: nginx-deployment - namespace: default - resourceVersion: "4416" - uid: 608c546a-e4e9-4665-baeb-4a70c09b6c8b -spec: - progressDeadlineSeconds: 600 - replicas: 3 - revisionHistoryLimit: 10 - selector: - matchLabels: - app: nginx - strategy: - rollingUpdate: - maxSurge: 25% - maxUnavailable: 25% - type: RollingUpdate - template: - metadata: - labels: - app: nginx - spec: - containers: - - image: nginx:1.14.2 - securityContext: - capabilities: - add: ["DAC_OVERRIDE", "SYS_ADMIN"] - imagePullPolicy: IfNotPresent - name: nginx - ports: - - containerPort: 80 - protocol: TCP - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - terminationGracePeriodSeconds: 30 -status: - availableReplicas: 3 - conditions: - - lastTransitionTime: "2022-03-07T15:57:38Z" - lastUpdateTime: "2022-03-07T15:57:38Z" - message: Deployment has minimum availability. - reason: MinimumReplicasAvailable - status: "True" - type: Available - - lastTransitionTime: "2022-03-07T15:57:36Z" - lastUpdateTime: "2022-03-07T15:57:38Z" - message: ReplicaSet "nginx-deployment-548f9774bc" has successfully progressed. - reason: NewReplicaSetAvailable - status: "True" - type: Progressing - observedGeneration: 1 - readyReplicas: 3 - replicas: 3 - updatedReplicas: 3 diff --git a/rules/CVE-2022-0492/test/cap_dac_override_fail/expected.json b/rules/CVE-2022-0492/test/cap_dac_override_fail/expected.json deleted file mode 100644 index e37846e08..000000000 --- a/rules/CVE-2022-0492/test/cap_dac_override_fail/expected.json +++ /dev/null @@ -1,21 +0,0 @@ -[{ - "alertMessage": "You may be vulnerable to CVE-2022-0492", - "reviewPaths": ["spec.template.spec.containers[0].securityContext.capabilities.add[0]"], - "failedPaths": ["spec.template.spec.containers[0].securityContext.capabilities.add[0]"], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 4, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "labels": { - "app": "nginx" - }, - "name": "nginx-deployment" - } - }] - } -}] \ No newline at end of file diff --git a/rules/CVE-2022-0492/test/cap_dac_override_fail/input/deploy.yaml b/rules/CVE-2022-0492/test/cap_dac_override_fail/input/deploy.yaml deleted file mode 100644 index f45d3a8da..000000000 --- a/rules/CVE-2022-0492/test/cap_dac_override_fail/input/deploy.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - deployment.kubernetes.io/revision: "1" - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"nginx"},"name":"nginx-deployment","namespace":"default"},"spec":{"replicas":3,"selector":{"matchLabels":{"app":"nginx"}},"template":{"metadata":{"labels":{"app":"nginx"}},"spec":{"containers":[{"image":"nginx:1.14.2","name":"nginx","ports":[{"containerPort":80}]}],"securityContext":{"runAsNonRoot":false}}}}} - creationTimestamp: "2022-03-07T15:57:36Z" - generation: 1 - labels: - app: nginx - name: nginx-deployment - namespace: default - resourceVersion: "4416" - uid: 608c546a-e4e9-4665-baeb-4a70c09b6c8b -spec: - progressDeadlineSeconds: 600 - replicas: 3 - revisionHistoryLimit: 10 - selector: - matchLabels: - app: nginx - strategy: - rollingUpdate: - maxSurge: 25% - maxUnavailable: 25% - type: RollingUpdate - template: - metadata: - labels: - app: nginx - spec: - containers: - - image: nginx:1.14.2 - securityContext: - seccompProfile: - type: Localhost - localhostProfile: profiles/audit.json - capabilities: - add: ["DAC_OVERRIDE"] - imagePullPolicy: IfNotPresent - name: nginx - ports: - - containerPort: 80 - protocol: TCP - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - terminationGracePeriodSeconds: 30 -status: - availableReplicas: 3 - conditions: - - lastTransitionTime: "2022-03-07T15:57:38Z" - lastUpdateTime: "2022-03-07T15:57:38Z" - message: Deployment has minimum availability. - reason: MinimumReplicasAvailable - status: "True" - type: Available - - lastTransitionTime: "2022-03-07T15:57:36Z" - lastUpdateTime: "2022-03-07T15:57:38Z" - message: ReplicaSet "nginx-deployment-548f9774bc" has successfully progressed. - reason: NewReplicaSetAvailable - status: "True" - type: Progressing - observedGeneration: 1 - readyReplicas: 3 - replicas: 3 - updatedReplicas: 3 diff --git a/rules/CVE-2022-0492/test/no_new_privs_fail/expected.json b/rules/CVE-2022-0492/test/no_new_privs_fail/expected.json deleted file mode 100644 index 2face1fb8..000000000 --- a/rules/CVE-2022-0492/test/no_new_privs_fail/expected.json +++ /dev/null @@ -1,24 +0,0 @@ -[{ - "alertMessage": "You may be vulnerable to CVE-2022-0492", - "reviewPaths": [], - "failedPaths": [], - "fixPaths": [{ - "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.runAsNonRoot", - "value": "true" - }, { - "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation", - "value": "false" - }], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 4, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "batch/v1", - "kind": "CronJob", - "metadata": { - "name": "hello" - } - }] - } -}] \ No newline at end of file diff --git a/rules/CVE-2022-0492/test/no_new_privs_fail/input/cronjob.yaml b/rules/CVE-2022-0492/test/no_new_privs_fail/input/cronjob.yaml deleted file mode 100644 index d3df84dc7..000000000 --- a/rules/CVE-2022-0492/test/no_new_privs_fail/input/cronjob.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: hello -spec: - schedule: "* * * * *" - jobTemplate: - spec: - template: - spec: - containers: - - name: hello - image: busybox - imagePullPolicy: IfNotPresent - command: - - /bin/sh - - -c - - date; echo Hello from the Kubernetes cluster - restartPolicy: OnFailure \ No newline at end of file diff --git a/rules/CVE-2022-0492/test/no_new_privs_pass/expected.json b/rules/CVE-2022-0492/test/no_new_privs_pass/expected.json deleted file mode 100644 index 0637a088a..000000000 --- a/rules/CVE-2022-0492/test/no_new_privs_pass/expected.json +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/rules/CVE-2022-0492/test/no_new_privs_pass/input/cronjob.yaml b/rules/CVE-2022-0492/test/no_new_privs_pass/input/cronjob.yaml deleted file mode 100644 index eef252e89..000000000 --- a/rules/CVE-2022-0492/test/no_new_privs_pass/input/cronjob.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: hello -spec: - schedule: "* * * * *" - jobTemplate: - spec: - template: - spec: - containers: - - name: hello - securityContext: - privileged: true - image: busybox - imagePullPolicy: IfNotPresent - command: - - /bin/sh - - -c - - date; echo Hello from the Kubernetes cluster - restartPolicy: OnFailure \ No newline at end of file diff --git a/rules/CVE-2022-0492/test/root_user_fail/expected.json b/rules/CVE-2022-0492/test/root_user_fail/expected.json deleted file mode 100644 index 4ae347b67..000000000 --- a/rules/CVE-2022-0492/test/root_user_fail/expected.json +++ /dev/null @@ -1,18 +0,0 @@ -[{ - "alertMessage": "You may be vulnerable to CVE-2022-0492", - "reviewPaths": ["spec.containers[0].securityContext.runAsUser"], - "failedPaths": ["spec.containers[0].securityContext.runAsUser"], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 4, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": "nginx" - } - }] - } -}] \ No newline at end of file diff --git a/rules/CVE-2022-0492/test/root_user_fail/input/pod.yaml b/rules/CVE-2022-0492/test/root_user_fail/input/pod.yaml deleted file mode 100644 index 222e96e18..000000000 --- a/rules/CVE-2022-0492/test/root_user_fail/input/pod.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: nginx -spec: - containers: - - name: nginx - image: nginx:1.14.2 - securityContext: - runAsUser: 0 - ports: - - containerPort: 80 \ No newline at end of file diff --git a/rules/CVE-2022-0492/test/root_user_pass/expected.json b/rules/CVE-2022-0492/test/root_user_pass/expected.json deleted file mode 100644 index 0637a088a..000000000 --- a/rules/CVE-2022-0492/test/root_user_pass/expected.json +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/rules/CVE-2022-0492/test/root_user_pass/input/pod.yaml b/rules/CVE-2022-0492/test/root_user_pass/input/pod.yaml deleted file mode 100644 index 4071cfc1f..000000000 --- a/rules/CVE-2022-0492/test/root_user_pass/input/pod.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: nginx -spec: - containers: - - name: nginx - image: nginx:1.14.2 - securityContext: - seccompProfile: - type: RuntimeDefault - runAsUser: 0 - allowPrivilegeEscalation: false - ports: - - containerPort: 80 \ No newline at end of file From 6360493cf735bea491548cd10eb685091a5670c9 Mon Sep 17 00:00:00 2001 From: Yuval Leibovich Date: Thu, 14 Dec 2023 09:11:36 +0200 Subject: [PATCH 076/195] updating script + fixing the issue --- .github/workflows/create-release.yaml | 2 +- .github/workflows/pr-tests.yaml | 2 +- .../workflows/push-releasedev-updates.yaml | 4 +- frameworks/__YAMLscan.json | 2 +- frameworks/cis-aks-t1.2.0.json | 16 +++---- frameworks/cis-eks-t1.2.0.json | 27 +++++++----- frameworks/cis-v1.23-t1.0.1.json | 34 +++++++-------- scripts/generate_subsections_ids.py | 42 ++++++++++++++----- scripts/generate_subsections_ids.sh | 19 --------- 9 files changed, 79 insertions(+), 69 deletions(-) delete mode 100755 scripts/generate_subsections_ids.sh diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml index ff33e3ab8..1cba49c17 100644 --- a/.github/workflows/create-release.yaml +++ b/.github/workflows/create-release.yaml @@ -66,7 +66,7 @@ jobs: # generating subsections ids - name: Update frameworks subsections - run: bash ./scripts/generate_subsections_ids.sh + run: python ./scripts/generate_subsections_ids.py # validate control-ID duplications - run: python ./scripts/validations.py diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 760c831aa..57a7397bb 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -80,7 +80,7 @@ jobs: # generating subsections ids - name: Update frameworks subsections - run: bash ./scripts/generate_subsections_ids.sh + run: python ./scripts/generate_subsections_ids.py # run export script to generate regolibrary artifacts # releaseDev clean up is for old compatability. should be removed at end of 2023. diff --git a/.github/workflows/push-releasedev-updates.yaml b/.github/workflows/push-releasedev-updates.yaml index 94bcd7dcd..b722a6dfe 100644 --- a/.github/workflows/push-releasedev-updates.yaml +++ b/.github/workflows/push-releasedev-updates.yaml @@ -2,13 +2,13 @@ name: Push to regolibrary-dev on: push: - branches: [master, main] + branches: [master] jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.head_ref }} fetch-depth: 0 diff --git a/frameworks/__YAMLscan.json b/frameworks/__YAMLscan.json index f719a0154..2e05517ce 100644 --- a/frameworks/__YAMLscan.json +++ b/frameworks/__YAMLscan.json @@ -57,6 +57,6 @@ "Label usage for resources", "K8s common labels usage", "Images from allowed registry", - "CVE-2022-24348-argocddirtraversal", + "CVE-2022-24348-argocddirtraversal" ] } diff --git a/frameworks/cis-aks-t1.2.0.json b/frameworks/cis-aks-t1.2.0.json index 21b6e6583..9fc2300bf 100644 --- a/frameworks/cis-aks-t1.2.0.json +++ b/frameworks/cis-aks-t1.2.0.json @@ -10,7 +10,9 @@ "AKS" ] }, - "typeTags": ["compliance"], + "typeTags": [ + "compliance" + ], "activeControls": [ { "controlID": "C-0078", @@ -549,15 +551,15 @@ "id": "3.2", "controlsIDs": [ "C-0172", - "C-0175", - "C-0179", - "C-0182", "C-0173", "C-0174", + "C-0175", "C-0176", "C-0177", "C-0178", + "C-0179", "C-0180", + "C-0182", "C-0183" ] } @@ -602,8 +604,8 @@ "name": "CNI Plugin", "id": "4.4", "controlsIDs": [ - "C-0206", - "C-0205" + "C-0205", + "C-0206" ] }, "5": { @@ -688,4 +690,4 @@ } } } -} +} \ No newline at end of file diff --git a/frameworks/cis-eks-t1.2.0.json b/frameworks/cis-eks-t1.2.0.json index 1fb2123ea..f9e23806a 100644 --- a/frameworks/cis-eks-t1.2.0.json +++ b/frameworks/cis-eks-t1.2.0.json @@ -2,7 +2,7 @@ "name": "cis-eks-t1.2.0", "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", "attributes": { - "armoBuiltin": true, + "armoBuiltin": true, "version": "v1.2.0" }, "scanningScope": { @@ -10,7 +10,9 @@ "EKS" ] }, - "typeTags": ["compliance"], + "typeTags": [ + "compliance" + ], "activeControls": [ { "controlID": "C-0066", @@ -499,7 +501,7 @@ "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive" } }, - { + { "controlID": "C-0242", "patch": { "name": "CIS-5.6.2 Hostile multi-tenant workloads" @@ -509,8 +511,8 @@ "controlID": "C-0246", "patch": { "name": "CIS-4.1.7 Avoid use of system:masters group" - } - } + } + } ], "subSections": { "2": { @@ -580,7 +582,8 @@ "C-0188", "C-0189", "C-0190", - "C-0191" + "C-0191", + "C-0246" ] }, "2": { @@ -618,7 +621,7 @@ "id": "4.6", "controlsIDs": [ "C-0209", - "C-0211", + "C-0211", "C-0212" ] } @@ -632,9 +635,10 @@ "name": "Image Registry and Image Scanning", "id": "5.1", "controlsIDs": [ + "C-0078", "C-0221", - "C-0223", - "C-0078" + "C-0222", + "C-0223" ] }, "2": { @@ -673,10 +677,11 @@ "name": "Other Cluster Configurations", "id": "5.6", "controlsIDs": [ - "C-0233" + "C-0233", + "C-0242" ] } } } } -} +} \ No newline at end of file diff --git a/frameworks/cis-v1.23-t1.0.1.json b/frameworks/cis-v1.23-t1.0.1.json index b82c9ca6f..a12e7c277 100644 --- a/frameworks/cis-v1.23-t1.0.1.json +++ b/frameworks/cis-v1.23-t1.0.1.json @@ -11,7 +11,9 @@ "file" ] }, - "typeTags": ["compliance"], + "typeTags": [ + "compliance" + ], "subSections": { "1": { "id": "1", @@ -79,7 +81,6 @@ "C-0141", "C-0142", "C-0143" - ] }, "3": { @@ -92,7 +93,7 @@ "C-0147", "C-0148", "C-0149", - "C-0150" + "C-0150" ] }, "4": { @@ -115,7 +116,7 @@ "C-0156", "C-0157", "C-0158", - "C-0159" + "C-0159" ] }, "3": { @@ -150,7 +151,6 @@ "C-0169", "C-0170", "C-0171" - ] }, "2": { @@ -169,7 +169,7 @@ "C-0181", "C-0182", "C-0183", - "C-0184" + "C-0184" ] } } @@ -188,7 +188,7 @@ "C-0188", "C-0189", "C-0190", - "C-0191" + "C-0191" ] }, "2": { @@ -207,38 +207,38 @@ "C-0201", "C-0202", "C-0203", - "C-0204" + "C-0204" ] }, "3": { "name": "Network Policies and CNI", "id": "5.3", "controlsIDs": [ - "C-0205", - "C-0206" + "C-0205", + "C-0206" ] }, "4": { "name": "Secrets Management", "id": "5.4", "controlsIDs": [ - "C-0207", - "C-0208" + "C-0207", + "C-0208" ] }, "7": { "name": "General Policies", "id": "5.7", "controlsIDs": [ - "C-0209", + "C-0209", "C-0210", - "C-0211", - "C-0212" + "C-0211", + "C-0212" ] } } } - }, + }, "activeControls": [ { "controlID": "C-0092", @@ -1330,4 +1330,4 @@ } } ] -} +} \ No newline at end of file diff --git a/scripts/generate_subsections_ids.py b/scripts/generate_subsections_ids.py index f7e265654..c77043284 100644 --- a/scripts/generate_subsections_ids.py +++ b/scripts/generate_subsections_ids.py @@ -9,26 +9,39 @@ import json import os import re +import logging +import sys + # constants currDir = os.path.abspath(os.getcwd()) frameworks_dir = os.path.join(currDir, 'frameworks') framework_name_to_filename_mapping = {} +logging.basicConfig(level=logging.INFO) + # ================================================ def init_framework_name_to_filename_mapping(): for filename in os.listdir(frameworks_dir): + logging.info(f"Checking file: {filename}") # Load the JSON files if filename.endswith('.json'): - with open(os.path.join(frameworks_dir, filename)) as f1: - framework = json.load(f1) - framework_name_to_filename_mapping[framework['name']] = filename + logging.info(f"file {filename} detected as a JSON") + try: + with open(os.path.join(frameworks_dir, filename)) as f1: + framework = json.load(f1) + framework_name_to_filename_mapping[framework['name']] = filename + except Exception as e: + logging.error(f"Error detected with file {filename}. Error: {e}") + sys.exit(1) + def init_parser(): # Set up the argument parser + logging.info(f"Initializing parser") parser = argparse.ArgumentParser() parser.add_argument("--framework", "-fw", required=True, help="Name of the framework to add the control to") parser.add_argument("--firstCleanList", "-clean", required=False, help="Clean controlIds list before population") @@ -38,13 +51,16 @@ def init_parser(): def restart_controlIDs_list(framework): + logging.info(f"Restarting controls ID list") for subsection1 in framework["subSections"]: if "subSections" in framework["subSections"][subsection1]: for item in framework["subSections"][subsection1]["subSections"]: framework["subSections"][subsection1]["subSections"][item]["controlsIDs"] = [] + logging.info(f"Restarting controls ID completed") def populate_controlIds_list(framework): + logging.info(f"Populating controls ID list") for active_control in framework["activeControls"]: control_id = active_control["controlID"] cis_subsection = active_control["patch"]["name"].split(" ")[0].replace("CIS-", "") @@ -55,11 +71,12 @@ def populate_controlIds_list(framework): tmp_controlIDs.append(control_id) -def main(): - args = init_parser() - framework_name = args.framework - restart_controlIDs_lists = args.firstCleanList - +def main(framework): + # args = init_parser() + # framework_name = args.framework + framework_name = framework + # restart_controlIDs_lists = args.firstCleanList + restart_controlIDs_lists = True init_framework_name_to_filename_mapping() @@ -84,5 +101,10 @@ def main(): if __name__ == "__main__": - # TODO: add comments and python convetion for all document - main() \ No newline at end of file + logging.info("Script started") + frameworks = ["cis-aks-t1.2.0", "cis-eks-t1.2.0", "cis-v1.23-t1.0.1"] + for i in frameworks: + logging.info(f"Running on framework {i}") + main(i) + logging.info("Script ended") + sys.exit(0) \ No newline at end of file diff --git a/scripts/generate_subsections_ids.sh b/scripts/generate_subsections_ids.sh deleted file mode 100755 index 4bfc31799..000000000 --- a/scripts/generate_subsections_ids.sh +++ /dev/null @@ -1,19 +0,0 @@ - - -#!/bin/sh - -frameworksVal="cis-aks-t1.2.0 cis-eks-t1.2.0 cis-v1.23-t1.0.1" -# frameworksVal="cis-v1.23-t1.0.1" - - -for val in $frameworksVal; do - echo "Started updating framework '$val' subscections ids" - python3 scripts/generate_subsections_ids.py -fw $val -clean true - status_code=$? - if [ $status_code -eq 0 ] - then - echo "Completed updating framework '$val' subscections ids" - else - exit 1 - fi -done From ea06e68c1f8b8628cda4507657439555fe64d705 Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Sun, 17 Dec 2023 13:44:26 +0200 Subject: [PATCH 077/195] hot-fix-279 Signed-off-by: David Wertenteil --- rules/pv-without-encryption/rule.metadata.json | 1 + 1 file changed, 1 insertion(+) diff --git a/rules/pv-without-encryption/rule.metadata.json b/rules/pv-without-encryption/rule.metadata.json index 23453c4ca..083abd384 100644 --- a/rules/pv-without-encryption/rule.metadata.json +++ b/rules/pv-without-encryption/rule.metadata.json @@ -1,6 +1,7 @@ { "name": "pv-without-encryption", "attributes": { + "useFromKubescapeVersion": "v3.0.2" }, "ruleLanguage": "Rego", "match": [ From 56a67a850175c261e16f2e576e2763ba6377b274 Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Sun, 17 Dec 2023 13:52:10 +0200 Subject: [PATCH 078/195] update KS version Signed-off-by: David Wertenteil --- rules/pv-without-encryption/rule.metadata.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/pv-without-encryption/rule.metadata.json b/rules/pv-without-encryption/rule.metadata.json index 083abd384..883df4e5c 100644 --- a/rules/pv-without-encryption/rule.metadata.json +++ b/rules/pv-without-encryption/rule.metadata.json @@ -1,7 +1,7 @@ { "name": "pv-without-encryption", "attributes": { - "useFromKubescapeVersion": "v3.0.2" + "useFromKubescapeVersion": "v3.0.3" }, "ruleLanguage": "Rego", "match": [ From 9be5e70c176429f0804a03ba392189c6bd1aba0b Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Sun, 17 Dec 2023 16:22:13 +0200 Subject: [PATCH 079/195] removed control C-0264 from soc2 FW Signed-off-by: David Wertenteil --- frameworks/soc2.json | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/frameworks/soc2.json b/frameworks/soc2.json index 03aa66125..9ff8422c5 100644 --- a/frameworks/soc2.json +++ b/frameworks/soc2.json @@ -60,16 +60,6 @@ "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." } - }, - { - "controlID": "C-0264", - "patch": { - "name": "Data in rest encryption - Persistent Volumes are encrypted (CC1.1,CC6.7)", - "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", - "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." - } } - - ] } From 7da795431e745bac6df0ea19f8fc60b901fee720 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 2 Jan 2024 13:08:19 +0200 Subject: [PATCH 080/195] fix path Signed-off-by: YiscahLevySilas1 --- rules/verify-image-signature/raw.rego | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/rules/verify-image-signature/raw.rego b/rules/verify-image-signature/raw.rego index fc74eb05d..d7c5d6038 100644 --- a/rules/verify-image-signature/raw.rego +++ b/rules/verify-image-signature/raw.rego @@ -9,13 +9,14 @@ deny[msga] { verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)] count(verified_keys) == 0 + path := sprintf("spec.containers[%v].image", [i]) msga := { "alertMessage": sprintf("signature not verified for image: %v", [container.image]), "alertScore": 7, "fixPaths": [], - "reviewPaths": [container.image], - "failedPaths": [container.image], + "reviewPaths": [path], + "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { "k8sApiObjects": [pod] @@ -32,12 +33,14 @@ deny[msga] { verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)] count(verified_keys) == 0 + path := sprintf("spec.template.spec.containers[%v].image", [i]) + msga := { "alertMessage": sprintf("signature not verified for image: %v", [container.image]), "alertScore": 7, "fixPaths": [], - "reviewPaths": [container.image], - "failedPaths": [container.image], + "reviewPaths": [path], + "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { "k8sApiObjects": [wl] @@ -54,12 +57,14 @@ deny[msga] { verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)] count(verified_keys) == 0 + path := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].image", [i]) + msga := { "alertMessage": sprintf("signature not verified for image: %v", [container.image]), "alertScore": 7, "fixPaths": [], - "reviewPaths": [container.image], - "failedPaths": [container.image], + "reviewPaths": [path], + "failedPaths": [path], "packagename": "armo_builtins", "alertObject": { "k8sApiObjects": [wl] From 2f12c3879dc7995e693ae7639942985490f95e7e Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 2 Jan 2024 14:35:01 +0200 Subject: [PATCH 081/195] deprecate-old-rules Signed-off-by: YiscahLevySilas1 --- .../access-container-service-account/raw.rego | 333 ------------------ .../rule.metadata.json | 69 ---- rules/exec-into-container/raw.rego | 138 -------- rules/exec-into-container/rule.metadata.json | 33 -- rules/exposed-sensitive-interfaces/raw.rego | 113 ------ .../rule.metadata.json | 66 ---- rules/rule-access-dashboard/raw.rego | 119 ------- .../rule-access-dashboard/rule.metadata.json | 27 -- rules/rule-can-delete-k8s-events/raw.rego | 137 ------- .../rule.metadata.json | 33 -- .../raw.rego | 139 -------- .../rule.metadata.json | 33 -- rules/rule-can-list-get-secrets/raw.rego | 143 -------- .../rule.metadata.json | 33 -- rules/rule-can-portforward/raw.rego | 131 ------- rules/rule-can-portforward/rule.metadata.json | 32 -- rules/rule-can-ssh-to-pod/raw.rego | 104 ------ rules/rule-can-ssh-to-pod/rule.metadata.json | 52 --- rules/rule-can-update-configmap/raw.rego | 170 --------- .../rule.metadata.json | 34 -- rules/rule-excessive-delete-rights/raw.rego | 171 --------- .../rule.metadata.json | 29 -- rules/rule-list-all-cluster-admins/raw.rego | 132 ------- .../rule.metadata.json | 33 -- 24 files changed, 2304 deletions(-) delete mode 100644 rules/access-container-service-account/raw.rego delete mode 100644 rules/access-container-service-account/rule.metadata.json delete mode 100644 rules/exec-into-container/raw.rego delete mode 100644 rules/exec-into-container/rule.metadata.json delete mode 100644 rules/exposed-sensitive-interfaces/raw.rego delete mode 100644 rules/exposed-sensitive-interfaces/rule.metadata.json delete mode 100644 rules/rule-access-dashboard/raw.rego delete mode 100644 rules/rule-access-dashboard/rule.metadata.json delete mode 100644 rules/rule-can-delete-k8s-events/raw.rego delete mode 100644 rules/rule-can-delete-k8s-events/rule.metadata.json delete mode 100644 rules/rule-can-impersonate-users-groups/raw.rego delete mode 100644 rules/rule-can-impersonate-users-groups/rule.metadata.json delete mode 100644 rules/rule-can-list-get-secrets/raw.rego delete mode 100644 rules/rule-can-list-get-secrets/rule.metadata.json delete mode 100644 rules/rule-can-portforward/raw.rego delete mode 100644 rules/rule-can-portforward/rule.metadata.json delete mode 100644 rules/rule-can-ssh-to-pod/raw.rego delete mode 100644 rules/rule-can-ssh-to-pod/rule.metadata.json delete mode 100644 rules/rule-can-update-configmap/raw.rego delete mode 100644 rules/rule-can-update-configmap/rule.metadata.json delete mode 100644 rules/rule-excessive-delete-rights/raw.rego delete mode 100644 rules/rule-excessive-delete-rights/rule.metadata.json delete mode 100644 rules/rule-list-all-cluster-admins/raw.rego delete mode 100644 rules/rule-list-all-cluster-admins/rule.metadata.json diff --git a/rules/access-container-service-account/raw.rego b/rules/access-container-service-account/raw.rego deleted file mode 100644 index 37f5c0690..000000000 --- a/rules/access-container-service-account/raw.rego +++ /dev/null @@ -1,333 +0,0 @@ -package armo_builtins - - -# Returns for each Pod, what are the permission of its service account - -deny[msga] { - serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == "ServiceAccount"] - serviceaccount := serviceAccounts[_] - serviceAccountName := serviceaccount.metadata.name - - pods := [pod | pod=input[_]; pod.kind =="Pod"] - pod := pods[_] - pod.spec.serviceAccountName == serviceAccountName - - not isNotAutoMount(serviceaccount, pod) - - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - rolebinding := rolebindings[_] - rolesubject := rolebinding.subjects[_] - rolesubject.name == serviceAccountName - - roles := [role | role = input[_]; role.kind == "Role"] - role := roles[_] - role.metadata.name == rolebinding.roleRef.name - - msga := { - "alertMessage": sprintf("Pod: %v has the following permissions in the cluster: %v", [pod.metadata.name, rolebinding.roleRef.name]), - "packagename": "armo_builtins", - "failedPaths": [], - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [rolebinding, role, pod] - } - } -} - -# Returns for each Pod, what are the permission of its service account - deny[msga] { - serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == "ServiceAccount"] - serviceaccount := serviceAccounts[_] - serviceAccountName := serviceaccount.metadata.name - - pods := [pod | pod=input[_]; pod.kind =="Pod"] - pod := pods[_] - pod.spec.serviceAccountName == serviceAccountName - - not isNotAutoMount(serviceaccount, pod) - - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - rolebinding := rolebindings[_] - rolesubject := rolebinding.subjects[_] - rolesubject.name == serviceAccountName - - roles := [role | role = input[_]; role.kind == "ClusterRole"] - role := roles[_] - role.metadata.name == rolebinding.roleRef.name - - msga := { - "alertMessage": sprintf("Pod: %v has the following permissions in the cluster: %v", [pod.metadata.name, rolebinding.roleRef.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "alertObject": { - "k8sApiObjects": [rolebinding, role, pod] - } - } -} - -# Returns for each Pod, what are the permission of its service account - - deny[msga] { - serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == "ServiceAccount"] - serviceaccount := serviceAccounts[_] - serviceAccountName := serviceaccount.metadata.name - - pods := [pod | pod=input[_]; pod.kind =="Pod"] - pod := pods[_] - pod.spec.serviceAccountName == serviceAccountName - - not isNotAutoMount(serviceaccount, pod) - - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - rolebinding := rolebindings[_] - rolesubject := rolebinding.subjects[_] - rolesubject.name == serviceAccountName - - roles := [role | role = input[_]; role.kind == "ClusterRole"] - role := roles[_] - role.metadata.name == rolebinding.roleRef.name - - msga := { - "alertMessage": sprintf("Pod: %v has the following permissions in the cluster: %v", [pod.metadata.name, rolebinding.roleRef.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "alertObject": { - "k8sApiObjects": [rolebinding, role, pod] - } - } -} - - - - -### ---------------- ##### - - - -# Returns for each Workloads, what are the permission of its service account -deny[msga] { - serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == "ServiceAccount"] - serviceaccount := serviceAccounts[_] - serviceAccountName := serviceaccount.metadata.name - - wl := input[_] - spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} - spec_template_spec_patterns[wl.kind] - - wl.spec.template.spec.serviceAccountName == serviceAccountName - - not isNotAutoMount(serviceaccount, wl.spec.template) - - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - rolebinding := rolebindings[_] - rolesubject := rolebinding.subjects[_] - rolesubject.name == serviceAccountName - - roles := [role | role = input[_]; role.kind == "Role"] - role := roles[_] - role.metadata.name == rolebinding.roleRef.name - - msga := { - "alertMessage": sprintf("%v: %v has the following permissions in the cluster: %v", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "alertObject": { - "k8sApiObjects": [rolebinding, role, wl] - } - } -} - - -# Returns for each Workloads, what are the permission of its service account -deny[msga] { - serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == "ServiceAccount"] - serviceaccount := serviceAccounts[_] - serviceAccountName := serviceaccount.metadata.name - - wl := input[_] - spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} - spec_template_spec_patterns[wl.kind] - - wl.spec.template.spec.serviceAccountName == serviceAccountName - - not isNotAutoMount(serviceaccount, wl.spec.template) - - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - rolebinding := rolebindings[_] - rolesubject := rolebinding.subjects[_] - rolesubject.name == serviceAccountName - - roles := [role | role = input[_]; role.kind == "ClusterRole"] - role := roles[_] - role.metadata.name == rolebinding.roleRef.name - - msga := { - "alertMessage": sprintf("%v: %v has the following permissions in the cluster: %v", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "alertObject": { - "k8sApiObjects": [rolebinding, role, wl] - } - } -} - - - -# Returns for each Workloads, what are the permission of its service account -deny[msga] { - serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == "ServiceAccount"] - serviceaccount := serviceAccounts[_] - serviceAccountName := serviceaccount.metadata.name - - wl := input[_] - spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} - spec_template_spec_patterns[wl.kind] - - wl.spec.template.spec.serviceAccountName == serviceAccountName - - not isNotAutoMount(serviceaccount, wl.spec.template) - - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - rolebinding := rolebindings[_] - rolesubject := rolebinding.subjects[_] - rolesubject.name == serviceAccountName - - roles := [role | role = input[_]; role.kind == "ClusterRole"] - role := roles[_] - role.metadata.name == rolebinding.roleRef.name - - - msga := { - "alertMessage": sprintf("%v: %v has the following permissions in the cluster: %v", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "alertObject": { - "k8sApiObjects": [rolebinding, role, wl] - } - } -} - - - - -### ---------------- ##### - - -# Returns for each Cronjob, what are the permission of its service account - -deny[msga] { - serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == "ServiceAccount"] - serviceaccount := serviceAccounts[_] - serviceAccountName := serviceaccount.metadata.name - - wl := input[_] - wl.kind == "CronJob" - wl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName - - not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template) - - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - rolebinding := rolebindings[_] - rolesubject := rolebinding.subjects[_] - rolesubject.name == serviceAccountName - - roles := [role | role = input[_]; role.kind == "Role"] - role := roles[_] - role.metadata.name == rolebinding.roleRef.name - - msga := { - "alertMessage": sprintf("Cronjob: %v has the following permissions in the cluster: %v", [wl.metadata.name, rolebinding.roleRef.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "alertObject": { - "k8sApiObjects": [rolebinding, role, wl] - } - } -} - - - -# Returns for each Cronjob, what are the permission of its service account -deny[msga] { - serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == "ServiceAccount"] - serviceaccount := serviceAccounts[_] - serviceAccountName := serviceaccount.metadata.name - - - wl := input[_] - wl.kind == "CronJob" - wl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName - - not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template) - - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - rolebinding := rolebindings[_] - rolesubject := rolebinding.subjects[_] - rolesubject.name == serviceAccountName - - roles := [role | role = input[_]; role.kind == "ClusterRole"] - role := roles[_] - role.metadata.name == rolebinding.roleRef.name - - msga := { - "alertMessage": sprintf("Cronjob: %v has the following permissions in the cluster: %v", [wl.metadata.name, rolebinding.roleRef.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "alertObject": { - "k8sApiObjects": [rolebinding, role, wl] - } - } -} - - -# Returns for each Cronjob, what are the permission of its service account -deny[msga] { - serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == "ServiceAccount"] - serviceaccount := serviceAccounts[_] - serviceAccountName := serviceaccount.metadata.name - - - wl := input[_] - wl.kind == "CronJob" - wl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName - - not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template) - - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - rolebinding := rolebindings[_] - rolesubject := rolebinding.subjects[_] - rolesubject.name == serviceAccountName - - roles := [role | role = input[_]; role.kind == "ClusterRole"] - role := roles[_] - role.metadata.name == rolebinding.roleRef.name - - - msga := { - "alertMessage": sprintf("Cronjob: %v has the following permissions in the cluster: %v", [wl.metadata.name, rolebinding.roleRef.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "alertObject": { - "k8sApiObjects": [rolebinding, role, wl] - } - } -} - -# =============================================================== - -isNotAutoMount(serviceaccount, pod) { - pod.spec.automountServiceAccountToken == false -} -isNotAutoMount(serviceaccount, pod) { - serviceaccount.automountServiceAccountToken == false - not pod.spec["automountServiceAccountToken"] -} - diff --git a/rules/access-container-service-account/rule.metadata.json b/rules/access-container-service-account/rule.metadata.json deleted file mode 100644 index c29218196..000000000 --- a/rules/access-container-service-account/rule.metadata.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "name": "access-container-service-account", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [ - - ], - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects" -} diff --git a/rules/exec-into-container/raw.rego b/rules/exec-into-container/raw.rego deleted file mode 100644 index c09d77250..000000000 --- a/rules/exec-into-container/raw.rego +++ /dev/null @@ -1,138 +0,0 @@ -package armo_builtins - -import data.cautils - -# input: clusterrolebindings + rolebindings -# apiversion: rbac.authorization.k8s.io/v1 -# returns subjects that can exec into container - -deny[msga] { - roles := [role | role= input[_]; role.kind == "Role"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - can_exec_to_pod_resource(rule) - can_exec_to_pod_verb(rule) - - rolebinding.roleRef.kind == "Role" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("the following %v: %v, can exec into containers", [subject.kind, subject.name]), - "alertScore": 9, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role, rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -# input: clusterrolebindings + rolebindings -# apiversion: rbac.authorization.k8s.io/v1 -# returns subjects that can exec into container - -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - can_exec_to_pod_resource(rule) - can_exec_to_pod_verb(rule) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("the following %v: %v, can exec into containers", [subject.kind, subject.name]), - "alertScore": 9, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role, rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - -# input: clusterrolebindings + rolebindings -# apiversion: rbac.authorization.k8s.io/v1 -# returns subjects that can exec into container - -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - can_exec_to_pod_resource(rule) - can_exec_to_pod_verb(rule) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("the following %v: %v, can exec into containers", [subject.kind, subject.name]), - "alertScore": 9, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role, rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - -can_exec_to_pod_verb(rule) { - cautils.list_contains(rule.verbs, "create") -} -can_exec_to_pod_verb(rule) { - cautils.list_contains(rule.verbs, "*") -} - -can_exec_to_pod_resource(rule) { - cautils.list_contains(rule.resources, "pods/exec") - -} -can_exec_to_pod_resource(rule) { - cautils.list_contains(rule.resources, "pods/*") -} -can_exec_to_pod_resource(rule) { - is_api_group(rule) - cautils.list_contains(rule.resources, "*") -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "" -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "*" -} \ No newline at end of file diff --git a/rules/exec-into-container/rule.metadata.json b/rules/exec-into-container/rule.metadata.json deleted file mode 100644 index 17e089973..000000000 --- a/rules/exec-into-container/rule.metadata.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "exec-into-container", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects" -} \ No newline at end of file diff --git a/rules/exposed-sensitive-interfaces/raw.rego b/rules/exposed-sensitive-interfaces/raw.rego deleted file mode 100644 index 2dccc002d..000000000 --- a/rules/exposed-sensitive-interfaces/raw.rego +++ /dev/null @@ -1,113 +0,0 @@ -package armo_builtins - -import data.kubernetes.api.client - -# loadbalancer -deny[msga] { - service := input[_] - service.kind == "Service" - service.spec.type == "LoadBalancer" - - wl := input[_] - workload_types = {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job", "Pod", "CronJob"} - workload_types[wl.kind] - result := wl_connectedto_service(wl, service) - - # see default-config-inputs.json for list values - services_names := data.postureControlInputs.servicesNames - services_names[service.metadata.name] - # externalIP := service.spec.externalIPs[_] - externalIP := service.status.loadBalancer.ingress[0].ip - - - msga := { - "alertMessage": sprintf("service: %v is exposed", [service.metadata.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "reviewPaths": result, - "failedPaths": result, - "alertObject": { - "k8sApiObjects": [wl, service] - } - } -} - - -# nodePort -# get a pod connected to that service, get nodeIP (hostIP?) -# use ip + nodeport -deny[msga] { - service := input[_] - service.kind == "Service" - service.spec.type == "NodePort" - - # see default-config-inputs.json for list values - services_names := data.postureControlInputs.servicesNames - services_names[service.metadata.name] - - pod := input[_] - pod.kind == "Pod" - - result := wl_connectedto_service(pod, service) - - - - msga := { - "alertMessage": sprintf("service: %v is exposed", [service.metadata.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "reviewPaths": result, - "failedPaths": result, - "alertObject": { - "k8sApiObjects": [pod, service] - } - } -} - -# nodePort -# get a workload connected to that service, get nodeIP (hostIP?) -# use ip + nodeport -deny[msga] { - service := input[_] - service.kind == "Service" - service.spec.type == "NodePort" - - # see default-config-inputs.json for list values - services_names := data.postureControlInputs.servicesNames - services_names[service.metadata.name] - - wl := input[_] - spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job", "CronJob"} - spec_template_spec_patterns[wl.kind] - - result := wl_connectedto_service(wl, service) - - pods_resource := client.query_all("pods") - pod := pods_resource.body.items[_] - my_pods := [pod | startswith(pod.metadata.name, wl.metadata.name)] - - - - msga := { - "alertMessage": sprintf("service: %v is exposed", [service.metadata.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "reviewPaths": result, - "failedPaths": result, - "alertObject": { - "k8sApiObjects": [wl, service] - } - } -} - -# ==================================================================================== - -wl_connectedto_service(wl, service) = paths{ - count({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector) - paths = ["spec.selector.matchLabels","spec.selector"] -} - -wl_connectedto_service(wl, service) = paths { - wl.spec.selector.matchLabels == service.spec.selector - paths = ["spec.selector.matchLabels", "spec.selector"] -} diff --git a/rules/exposed-sensitive-interfaces/rule.metadata.json b/rules/exposed-sensitive-interfaces/rule.metadata.json deleted file mode 100644 index 47b74abb7..000000000 --- a/rules/exposed-sensitive-interfaces/rule.metadata.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "name": "exposed-sensitive-interfaces", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.servicesNames" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.servicesNames", - "name": "Service names", - "description": "List of services relating to known software interfaces that should not generally be exposed to the Internet." - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins" -} \ No newline at end of file diff --git a/rules/rule-access-dashboard/raw.rego b/rules/rule-access-dashboard/raw.rego deleted file mode 100644 index 228aaaaff..000000000 --- a/rules/rule-access-dashboard/raw.rego +++ /dev/null @@ -1,119 +0,0 @@ -package armo_builtins - -# input: roleBinding -# apiversion: v1 -# fails if a subject that is not dashboard service account is bound to dashboard role - -deny[msga] { - roleBinding := input[_] - roleBinding.kind == "RoleBinding" - roleBinding.roleRef.name == "kubernetes-dashboard" - subject := roleBinding.subjects[_] - subject.name != "kubernetes-dashboard" - subject.kind != "ServiceAccount" - - msga := { - "alertMessage": sprintf("the following subjects: %s are bound to dashboard role/clusterrole", [subject.name]), - "alertScore": 9, - "failedPaths": [], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [roleBinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - -# input: clusterRoleBinding -# apiversion: v1 -# fails if a subject that is not dashboard service account is bound to dashboard role - -deny[msga] { - roleBinding := input[_] - roleBinding.kind == "ClusterRoleBinding" - roleBinding.roleRef.name == "kubernetes-dashboard" - subject := roleBinding.subjects[_] - subject.name != "kubernetes-dashboard" - subject.kind != "ServiceAccount" - - msga := { - "alertMessage": sprintf("the following subjects: %s are bound to dashboard role/clusterrole", [subject.name]), - "alertScore": 9, - "failedPaths": [], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [roleBinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - -# input: -# apiversion: -# fails if pod that is not dashboard is associated to dashboard service account - -deny[msga] { - pod := input[_] - pod.spec.serviceAccountName == "kubernetes-dashboard" - not startswith(pod.metadata.name, "kubernetes-dashboard") - path := "spec.serviceAccountName" - msga := { - "alertMessage": sprintf("the following pods: %s are associated with dashboard service account", [pod.metadata.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "deletePaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [pod] - } - } -} - -# input: -# apiversion: -# fails if workload that is not dashboard is associated to dashboard service account - -deny[msga] { - wl := input[_] - spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} - spec_template_spec_patterns[wl.kind] - wl.spec.template.spec.serviceAccountName == "kubernetes-dashboard" - not startswith(wl.metadata.name, "kubernetes-dashboard") - path := "spec.template.spec.serviceAccountName" - msga := { - "alertMessage": sprintf("%v: %v is associated with dashboard service account", [wl.kind, wl.metadata.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "deletePaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [wl] - } - } -} - -# input: -# apiversion: -# fails if CronJob that is not dashboard is associated to dashboard service account - -deny[msga] { - wl := input[_] - wl.kind == "CronJob" - wl.spec.jobTemplate.spec.template.spec.serviceAccountName == "kubernetes-dashboard" - not startswith(wl.metadata.name, "kubernetes-dashboard") - path := "spec.jobTemplate.spec.template.spec.serviceAccountName" - msga := { - "alertMessage": sprintf("the following cronjob: %s is associated with dashboard service account", [wl.metadata.name]), - "packagename": "armo_builtins", - "alertScore": 7, - "deletePaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [wl] - } - } -} \ No newline at end of file diff --git a/rules/rule-access-dashboard/rule.metadata.json b/rules/rule-access-dashboard/rule.metadata.json deleted file mode 100644 index 5378febee..000000000 --- a/rules/rule-access-dashboard/rule.metadata.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "name": "rule-access-dashboard", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects" - } \ No newline at end of file diff --git a/rules/rule-can-delete-k8s-events/raw.rego b/rules/rule-can-delete-k8s-events/raw.rego deleted file mode 100644 index a586984fc..000000000 --- a/rules/rule-can-delete-k8s-events/raw.rego +++ /dev/null @@ -1,137 +0,0 @@ -package armo_builtins - -import data.cautils - -# fails if user can delete events -# RoleBinding to Role -deny [msga] { - roles := [role | role= input[_]; role.kind == "Role"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canDeleteEventsResource(rule) - canDeleteEventsVerb(rule) - - rolebinding.roleRef.kind == "Role" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can delete events", [subject.kind, subject.name]), - "alertScore": 6, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -# fails if user can delete events -# RoleBinding to ClusterRole -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canDeleteEventsResource(rule) - canDeleteEventsVerb(rule) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can delete events", [subject.kind, subject.name]), - "alertScore": 6, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -# fails if user can delete events -# ClusterRoleBinding to ClusterRole -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - role:= roles[_] - clusterrolebinding := clusterrolebindings[_] - - rule:= role.rules[_] - canDeleteEventsResource(rule) - canDeleteEventsVerb(rule) - - clusterrolebinding.roleRef.kind == "ClusterRole" - clusterrolebinding.roleRef.name == role.metadata.name - - - subject := clusterrolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can delete events", [subject.kind, subject.name]), - "alertScore": 6, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,clusterrolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -canDeleteEventsResource(rule) { - cautils.list_contains(rule.resources,"events") -} -canDeleteEventsResource(rule) { - is_api_group(rule) - cautils.list_contains(rule.resources,"*") -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "*" -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "" -} - -canDeleteEventsVerb(rule) { - cautils.list_contains(rule.verbs,"delete") -} - -canDeleteEventsVerb(rule) { - cautils.list_contains(rule.verbs,"deletecollection") -} - -canDeleteEventsVerb(rule) { - cautils.list_contains(rule.verbs,"*") -} \ No newline at end of file diff --git a/rules/rule-can-delete-k8s-events/rule.metadata.json b/rules/rule-can-delete-k8s-events/rule.metadata.json deleted file mode 100644 index 797eb1ea8..000000000 --- a/rules/rule-can-delete-k8s-events/rule.metadata.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "rule-can-delete-k8s-events", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects" - } \ No newline at end of file diff --git a/rules/rule-can-impersonate-users-groups/raw.rego b/rules/rule-can-impersonate-users-groups/raw.rego deleted file mode 100644 index 8fe4e0589..000000000 --- a/rules/rule-can-impersonate-users-groups/raw.rego +++ /dev/null @@ -1,139 +0,0 @@ -package armo_builtins - -import data.cautils - -deny[msga] { - roles := [role | role= input[_]; role.kind == "Role"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canImpersonateVerb(rule) - canImpersonateResource(rule) - - rolebinding.roleRef.kind == "Role" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("the following %v: %v, can impersonate users", [subject.kind, subject.name]), - "alertScore": 9, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role, rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canImpersonateVerb(rule) - canImpersonateResource(rule) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("the following %v: %v, can impersonate users", [subject.kind, subject.name]), - "alertScore": 9, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role, rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - - -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canImpersonateVerb(rule) - canImpersonateResource(rule) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("the following %v: %v, can impersonate users", [subject.kind, subject.name]), - "alertScore": 9, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role, rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -canImpersonateVerb(rule) { - cautils.list_contains(rule.verbs, "impersonate") -} -canImpersonateVerb(rule) { - cautils.list_contains(rule.verbs, "*") -} - - -canImpersonateResource(rule) { - cautils.list_contains(rule.resources,"users") -} - -canImpersonateResource(rule) { - cautils.list_contains(rule.resources,"serviceaccounts") -} - -canImpersonateResource(rule) { - cautils.list_contains(rule.resources,"groups") -} - -canImpersonateResource(rule) { - cautils.list_contains(rule.resources,"uids") -} - -canImpersonateResource(rule) { - is_api_group(rule) - cautils.list_contains(rule.resources,"*") -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "*" -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "" -} \ No newline at end of file diff --git a/rules/rule-can-impersonate-users-groups/rule.metadata.json b/rules/rule-can-impersonate-users-groups/rule.metadata.json deleted file mode 100644 index 88ba455c0..000000000 --- a/rules/rule-can-impersonate-users-groups/rule.metadata.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "rule-can-impersonate-users-groups", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects" - } \ No newline at end of file diff --git a/rules/rule-can-list-get-secrets/raw.rego b/rules/rule-can-list-get-secrets/raw.rego deleted file mode 100644 index f94f31ab1..000000000 --- a/rules/rule-can-list-get-secrets/raw.rego +++ /dev/null @@ -1,143 +0,0 @@ -package armo_builtins - -import data.cautils - -# fails if user can list/get secrets -# RoleBinding to Role -deny[msga] { - roles := [role | role= input[_]; role.kind == "Role"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canViewSecretsResource(rule) - canViewSecretsVerb(rule) - - rolebinding.roleRef.kind == "Role" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can read secrets", [subject.kind, subject.name]), - "alertScore": 9, - "packagename": "armo_builtins", - "deletePaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -# fails if user can list/get secrets -# RoleBinding to ClusterRole -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canViewSecretsResource(rule) - canViewSecretsVerb(rule) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can read secrets", [subject.kind, subject.name]), - "alertScore": 9, - "packagename": "armo_builtins", - "deletePaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - -# fails if user can list/get secrets -# ClusterRoleBinding to ClusterRole -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - role:= roles[_] - clusterrolebinding := clusterrolebindings[_] - - rule:= role.rules[_] - canViewSecretsResource(rule) - canViewSecretsVerb(rule) - - clusterrolebinding.roleRef.kind == "ClusterRole" - clusterrolebinding.roleRef.name == role.metadata.name - - subject := clusterrolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can read secrets", [subject.kind, subject.name]), - "alertScore": 9, - "packagename": "armo_builtins", - "deletePaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [role,clusterrolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - - - -canViewSecretsVerb(rule) { - cautils.list_contains(rule.verbs,"get") -} - -canViewSecretsVerb(rule) { - cautils.list_contains(rule.verbs,"list") -} - -canViewSecretsVerb(rule) { - cautils.list_contains(rule.verbs,"watch") -} - - -canViewSecretsVerb(rule) { - cautils.list_contains(rule.verbs,"*") -} - - -canViewSecretsResource(rule) { - cautils.list_contains(rule.resources,"secrets") -} - -canViewSecretsResource(rule) { - is_api_group(rule) - cautils.list_contains(rule.resources,"*") -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "*" -} -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "" -} \ No newline at end of file diff --git a/rules/rule-can-list-get-secrets/rule.metadata.json b/rules/rule-can-list-get-secrets/rule.metadata.json deleted file mode 100644 index a6824d60f..000000000 --- a/rules/rule-can-list-get-secrets/rule.metadata.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "rule-can-list-get-secrets", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects" - } \ No newline at end of file diff --git a/rules/rule-can-portforward/raw.rego b/rules/rule-can-portforward/raw.rego deleted file mode 100644 index b5fa53bf3..000000000 --- a/rules/rule-can-portforward/raw.rego +++ /dev/null @@ -1,131 +0,0 @@ -package armo_builtins - -import data.cautils - -deny[msga] { - roles := [role | role= input[_]; role.kind == "Role"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canForwardToPodResource(rule) - canForwardToPodVerb(rule) - - rolebinding.roleRef.kind == "Role" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("the following %v: %v, can do port forwarding", [subject.kind, subject.name]), - "alertScore": 9, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role, rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canForwardToPodResource(rule) - canForwardToPodVerb(rule) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("the following %v: %v, can do port forwarding", [subject.kind, subject.name]), - "alertScore": 9, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role, rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - - -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canForwardToPodResource(rule) - canForwardToPodVerb(rule) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("the following %v: %v, can do port forwarding", [subject.kind, subject.name]), - "alertScore": 9, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role, rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - -canForwardToPodVerb(rule) { - cautils.list_contains(rule.verbs, "create") -} - -canForwardToPodVerb(rule) { - cautils.list_contains(rule.verbs, "get") -} -canForwardToPodVerb(rule) { - cautils.list_contains(rule.verbs, "*") -} - -canForwardToPodResource(rule) { - cautils.list_contains(rule.resources,"pods/portforward") -} -canForwardToPodResource(rule) { - cautils.list_contains(rule.resources,"pods/*") -} -canForwardToPodResource(rule) { - is_api_group(rule) - cautils.list_contains(rule.resources,"*") -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "" -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "*" -} diff --git a/rules/rule-can-portforward/rule.metadata.json b/rules/rule-can-portforward/rule.metadata.json deleted file mode 100644 index 6fe58a54e..000000000 --- a/rules/rule-can-portforward/rule.metadata.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "name": "rule-can-portforward", - "attributes": { - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects" - } \ No newline at end of file diff --git a/rules/rule-can-ssh-to-pod/raw.rego b/rules/rule-can-ssh-to-pod/raw.rego deleted file mode 100644 index 34699995d..000000000 --- a/rules/rule-can-ssh-to-pod/raw.rego +++ /dev/null @@ -1,104 +0,0 @@ -package armo_builtins - -# input: pod -# apiversion: v1 -# does: returns the external facing services of that pod - -deny[msga] { - pod := input[_] - pod.kind == "Pod" - podns := pod.metadata.namespace - podname := pod.metadata.name - labels := pod.metadata.labels - filtered_labels := json.remove(labels, ["pod-template-hash"]) - path := "metadata.labels" - service := input[_] - service.kind == "Service" - service.metadata.namespace == podns - service.spec.selector == filtered_labels - - hasSSHPorts(service) - - msga := { - "alertMessage": sprintf("pod %v/%v exposed by SSH services: %v", [podns, podname, service]), - "packagename": "armo_builtins", - "alertScore": 7, - "deletePaths": [path], - "failedPaths": [path], - "fixPaths": [], - "alertObject": { - "k8sApiObjects": [pod,service] - } - } -} - -deny[msga] { - wl := input[_] - spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} - spec_template_spec_patterns[wl.kind] - labels := wl.spec.template.metadata.labels - path := "spec.template.metadata.labels" - service := input[_] - service.kind == "Service" - service.metadata.namespace == wl.metadata.namespace - service.spec.selector == labels - - hasSSHPorts(service) - - msga := { - "alertMessage": sprintf("%v: %v is exposed by SSH services: %v", [wl.kind, wl.metadata.name, service]), - "packagename": "armo_builtins", - "alertScore": 7, - "deletePaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [wl,service] - } - } -} - -deny[msga] { - wl := input[_] - wl.kind == "CronJob" - labels := wl.spec.jobTemplate.spec.template.metadata.labels - path := "spec.jobTemplate.spec.template.metadata.labels" - service := input[_] - service.kind == "Service" - service.metadata.namespace == wl.metadata.namespace - service.spec.selector == labels - - hasSSHPorts(service) - - msga := { - "alertMessage": sprintf("%v: %v is exposed by SSH services: %v", [wl.kind, wl.metadata.name, service]), - "packagename": "armo_builtins", - "alertScore": 7, - "deletePaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [wl,service] - } - } -} - -hasSSHPorts(service) { - port := service.spec.ports[_] - port.port == 22 -} - - -hasSSHPorts(service) { - port := service.spec.ports[_] - port.port == 2222 -} - -hasSSHPorts(service) { - port := service.spec.ports[_] - port.targetPort == 22 -} - - -hasSSHPorts(service) { - port := service.spec.ports[_] - port.targetPort == 2222 -} diff --git a/rules/rule-can-ssh-to-pod/rule.metadata.json b/rules/rule-can-ssh-to-pod/rule.metadata.json deleted file mode 100644 index 5d2cc8311..000000000 --- a/rules/rule-can-ssh-to-pod/rule.metadata.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "rule-can-ssh-to-pod", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins" -} \ No newline at end of file diff --git a/rules/rule-can-update-configmap/raw.rego b/rules/rule-can-update-configmap/raw.rego deleted file mode 100644 index 305e000d0..000000000 --- a/rules/rule-can-update-configmap/raw.rego +++ /dev/null @@ -1,170 +0,0 @@ -package armo_builtins - -import data.cautils - -# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns) -# RoleBinding to Role -deny [msga] { - configmaps := [configmap | configmap = input[_]; configmap.kind == "ConfigMap"] - configmap := configmaps[_] - configmap.metadata.name == "coredns" - - roles := [role | role= input[_]; role.kind == "Role"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - - canModifyConfigMapResource(rule) - canModifyConfigMapVerb(rule) - - rolebinding.roleRef.kind == "Role" - rolebinding.roleRef.name == role.metadata.name - rolebinding.metadata.namespace == "kube-system" - - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can modify 'coredns' configmap", [subject.kind, subject.name]), - "alertScore": 6, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns) -# RoleBinding to ClusterRole -deny[msga] { - configmaps := [configmap | configmap = input[_]; configmap.kind == "ConfigMap"] - configmap := configmaps[_] - configmap.metadata.name == "coredns" - - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canModifyConfigMapResource(rule) - canModifyConfigMapVerb(rule) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - rolebinding.metadata.namespace == "kube-system" - - - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can modify 'coredns' configmap", [subject.kind, subject.name]), - "alertScore": 6, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } - -} - - -# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns) -# ClusterRoleBinding to ClusterRole -deny[msga] { - configmaps := [configmap | configmap = input[_]; configmap.kind == "ConfigMap"] - configmap := configmaps[_] - configmap.metadata.name == "coredns" - - roles := [role | role= input[_]; role.kind == "ClusterRole"] - clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - role:= roles[_] - clusterrolebinding := clusterrolebindings[_] - - rule:= role.rules[_] - canModifyConfigMapResource(rule) - canModifyConfigMapVerb(rule) - - - clusterrolebinding.roleRef.kind == "ClusterRole" - clusterrolebinding.roleRef.name == role.metadata.name - - - - subject := clusterrolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can modify 'coredns' configmap", [subject.kind, subject.name]), - "alertScore": 6, - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,clusterrolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - - - - - canModifyConfigMapResource(rule) { - not rule.resourceNames - cautils.list_contains(rule.resources,"configmaps") - } - - canModifyConfigMapResource(rule) { - not rule.resourceNames - is_api_group(rule) - cautils.list_contains(rule.resources,"*") - } - - canModifyConfigMapResource(rule) { - cautils.list_contains(rule.resources,"configmaps") - cautils.list_contains(rule.resourceNames,"coredns") - } - - canModifyConfigMapVerb(rule) { - cautils.list_contains(rule.verbs,"update") - } - - - canModifyConfigMapVerb(rule) { - cautils.list_contains(rule.verbs,"patch") - } - - canModifyConfigMapVerb(rule) { - cautils.list_contains(rule.verbs,"*") - } - - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "*" -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "" -} \ No newline at end of file diff --git a/rules/rule-can-update-configmap/rule.metadata.json b/rules/rule-can-update-configmap/rule.metadata.json deleted file mode 100644 index bfd5344e4..000000000 --- a/rules/rule-can-update-configmap/rule.metadata.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "name": "rule-can-update-configmap", - "attributes": { - "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding", - "ConfigMap" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can update/patch the 'coredns' configmap", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects" - } \ No newline at end of file diff --git a/rules/rule-excessive-delete-rights/raw.rego b/rules/rule-excessive-delete-rights/raw.rego deleted file mode 100644 index a5560634f..000000000 --- a/rules/rule-excessive-delete-rights/raw.rego +++ /dev/null @@ -1,171 +0,0 @@ -package armo_builtins - -import data.cautils - -# fails if user can can delete important resources -# RoleBinding to Role -deny[msga] { - roles := [role | role= input[_]; role.kind == "Role"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canDeleteResource(rule) - canDeleteVerb(rule) - - rolebinding.roleRef.kind == "Role" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can delete important resources", [subject.kind, subject.name]), - "alertScore": 9, - "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -# fails if user can can delete important resources -# RoleBinding to ClusterRole -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[_] - canDeleteResource(rule) - canDeleteVerb(rule) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can delete important resources", [subject.kind, subject.name]), - "alertScore": 9, - "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - -# fails if user can can delete important resources -# ClusterRoleBinding to ClusterRole -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - role:= roles[_] - clusterrolebinding := clusterrolebindings[_] - - rule:= role.rules[_] - canDeleteResource(rule) - canDeleteVerb(rule) - - clusterrolebinding.roleRef.kind == "ClusterRole" - clusterrolebinding.roleRef.name == role.metadata.name - - - subject := clusterrolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v can delete important resources", [subject.kind, subject.name]), - "alertScore": 9, - "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,clusterrolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -canDeleteVerb(rule) { - cautils.list_contains(rule.verbs, "delete") -} - -canDeleteVerb(rule) { - cautils.list_contains(rule.verbs, "deletecollection") -} - -canDeleteVerb(rule) { - cautils.list_contains(rule.verbs, "*") -} - -canDeleteResource(rule) { - cautils.list_contains(rule.resources, "secrets") -} -canDeleteResource(rule) { - cautils.list_contains(rule.resources, "pods") -} -canDeleteResource(rule) { - cautils.list_contains(rule.resources, "services") -} -canDeleteResource(rule) { - cautils.list_contains(rule.resources, "deployments") -} -canDeleteResource(rule) { - cautils.list_contains(rule.resources, "replicasets") -} -canDeleteResource(rule) { - cautils.list_contains(rule.resources, "daemonsets") -} -canDeleteResource(rule) { - cautils.list_contains(rule.resources, "statefulsets") -} -canDeleteResource(rule) { - cautils.list_contains(rule.resources, "jobs") -} -canDeleteResource(rule) { - cautils.list_contains(rule.resources, "cronjobs") -} -canDeleteResource(rule) { - is_api_group(rule) - cautils.list_contains(rule.resources, "*") -} - - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "" -} -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "*" -} -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "apps" -} -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "batch" -} - diff --git a/rules/rule-excessive-delete-rights/rule.metadata.json b/rules/rule-excessive-delete-rights/rule.metadata.json deleted file mode 100644 index dc33d12d8..000000000 --- a/rules/rule-excessive-delete-rights/rule.metadata.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "name": "rule-excessive-delete-rights", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects" - } \ No newline at end of file diff --git a/rules/rule-list-all-cluster-admins/raw.rego b/rules/rule-list-all-cluster-admins/raw.rego deleted file mode 100644 index fffa8bfa8..000000000 --- a/rules/rule-list-all-cluster-admins/raw.rego +++ /dev/null @@ -1,132 +0,0 @@ -package armo_builtins - -import data.cautils - -# input: roles -# apiversion: v1 -# does: returns roles+ related subjects in rolebinding - -deny[msga] { - roles := [role | role= input[_]; role.kind == "Role"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[i] - canCreate(rule, i) - canCreateResources(rule, i) - - rolebinding.roleRef.kind == "Role" - rolebinding.roleRef.name == role.metadata.name - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v have high privileges, such as cluster-admin", [subject.kind, subject.name]), - "alertScore": 9, - "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - -# input: ClusterRole -# apiversion: v1 -# does: returns clusterroles+ related subjects in rolebinding - -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "RoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[i] - canCreate(rule, i) - canCreateResources(rule, i) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v have high privileges, such as cluster-admin", [subject.kind, subject.name]), - "alertScore": 9, - "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - -# input: ClusterRole -# apiversion: v1 -# does: returns clusterroles+ related subjects in clusterrolebinding - -deny[msga] { - roles := [role | role= input[_]; role.kind == "ClusterRole"] - rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == "ClusterRoleBinding"] - role:= roles[_] - rolebinding := rolebindings[_] - - rule:= role.rules[i] - canCreate(rule, i) - canCreateResources(rule, i) - - rolebinding.roleRef.kind == "ClusterRole" - rolebinding.roleRef.name == role.metadata.name - - subject := rolebinding.subjects[i] - path := sprintf("subjects[%v]", [format_int(i, 10)]) - - msga := { - "alertMessage": sprintf("The following %v: %v have high privileges, such as cluster-admin", [subject.kind, subject.name]), - "alertScore": 9, - "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], - "packagename": "armo_builtins", - "alertObject": { - "k8sApiObjects": [role,rolebinding], - "externalObjects": { - "subject" : [subject] - } - } - } -} - - -canCreate(rule, i) { - verb := rule.verbs[j] - verb == "*" -} - -canCreateResources(rule, i){ - is_api_group(rule) - resource := rule.resources[j] - resource == "*" -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "" -} - -is_api_group(rule) { - apiGroup := rule.apiGroups[_] - apiGroup == "*" -} diff --git a/rules/rule-list-all-cluster-admins/rule.metadata.json b/rules/rule-list-all-cluster-admins/rule.metadata.json deleted file mode 100644 index 3cf574f7e..000000000 --- a/rules/rule-list-all-cluster-admins/rule.metadata.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "rule-list-all-cluster-admins", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects" -} \ No newline at end of file From 27745cc366cbc1db9ff5236f896e285de07f129b Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 2 Jan 2024 14:48:58 +0200 Subject: [PATCH 082/195] deprecate-old-rules Signed-off-by: YiscahLevySilas1 --- controls/C-0002-execintocontainer.json | 1 - controls/C-0007-datadestruction.json | 1 - controls/C-0014-accesskubernetesdashboard.json | 1 - controls/C-0015-listkubernetessecrets.json | 1 - controls/C-0021-exposedsensitiveinterfaces.json | 1 - controls/C-0031-deletekubernetesevents.json | 1 - controls/C-0035-clusteradminbinding.json | 1 - controls/C-0037-corednspoisoning.json | 1 - controls/C-0042-sshserverrunninginsidecontainer.json | 1 - controls/C-0053-accesscontainerserviceaccount.json | 1 - controls/C-0063-portforwardingprivileges.json | 1 - controls/C-0065-noimpersonation.json | 1 - 12 files changed, 12 deletions(-) diff --git a/controls/C-0002-execintocontainer.json b/controls/C-0002-execintocontainer.json index 9c0902c31..39c9b93f3 100644 --- a/controls/C-0002-execintocontainer.json +++ b/controls/C-0002-execintocontainer.json @@ -13,7 +13,6 @@ "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", "rulesNames": [ - "exec-into-container", "exec-into-container-v1" ], "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", diff --git a/controls/C-0007-datadestruction.json b/controls/C-0007-datadestruction.json index 984200f71..ae5fb367f 100644 --- a/controls/C-0007-datadestruction.json +++ b/controls/C-0007-datadestruction.json @@ -12,7 +12,6 @@ "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", "rulesNames": [ - "rule-excessive-delete-rights", "rule-excessive-delete-rights-v1" ], "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", diff --git a/controls/C-0014-accesskubernetesdashboard.json b/controls/C-0014-accesskubernetesdashboard.json index 05740d557..84b01cfa3 100644 --- a/controls/C-0014-accesskubernetesdashboard.json +++ b/controls/C-0014-accesskubernetesdashboard.json @@ -13,7 +13,6 @@ "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", "rulesNames": [ - "rule-access-dashboard", "rule-access-dashboard-subject-v1", "rule-access-dashboard-wl-v1" ], diff --git a/controls/C-0015-listkubernetessecrets.json b/controls/C-0015-listkubernetessecrets.json index 1f0203802..6474e7b85 100644 --- a/controls/C-0015-listkubernetessecrets.json +++ b/controls/C-0015-listkubernetessecrets.json @@ -13,7 +13,6 @@ "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", "rulesNames": [ - "rule-can-list-get-secrets", "rule-can-list-get-secrets-v1" ], "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", diff --git a/controls/C-0021-exposedsensitiveinterfaces.json b/controls/C-0021-exposedsensitiveinterfaces.json index d606eee15..8d51908b0 100644 --- a/controls/C-0021-exposedsensitiveinterfaces.json +++ b/controls/C-0021-exposedsensitiveinterfaces.json @@ -12,7 +12,6 @@ "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", "rulesNames": [ - "exposed-sensitive-interfaces", "exposed-sensitive-interfaces-v1" ], "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", diff --git a/controls/C-0031-deletekubernetesevents.json b/controls/C-0031-deletekubernetesevents.json index f862b18fd..7f76c856e 100644 --- a/controls/C-0031-deletekubernetesevents.json +++ b/controls/C-0031-deletekubernetesevents.json @@ -13,7 +13,6 @@ "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", "rulesNames": [ - "rule-can-delete-k8s-events", "rule-can-delete-k8s-events-v1" ], "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", diff --git a/controls/C-0035-clusteradminbinding.json b/controls/C-0035-clusteradminbinding.json index 8e6cebce6..ef6cf7da4 100644 --- a/controls/C-0035-clusteradminbinding.json +++ b/controls/C-0035-clusteradminbinding.json @@ -13,7 +13,6 @@ "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", "rulesNames": [ - "rule-list-all-cluster-admins", "rule-list-all-cluster-admins-v1" ], "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", diff --git a/controls/C-0037-corednspoisoning.json b/controls/C-0037-corednspoisoning.json index 3eb69d04b..2ff4a9ce1 100644 --- a/controls/C-0037-corednspoisoning.json +++ b/controls/C-0037-corednspoisoning.json @@ -11,7 +11,6 @@ "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", "rulesNames": [ - "rule-can-update-configmap", "rule-can-update-configmap-v1" ], "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", diff --git a/controls/C-0042-sshserverrunninginsidecontainer.json b/controls/C-0042-sshserverrunninginsidecontainer.json index 2163c6961..44d723272 100644 --- a/controls/C-0042-sshserverrunninginsidecontainer.json +++ b/controls/C-0042-sshserverrunninginsidecontainer.json @@ -11,7 +11,6 @@ "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", "rulesNames": [ - "rule-can-ssh-to-pod", "rule-can-ssh-to-pod-v1" ], "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", diff --git a/controls/C-0053-accesscontainerserviceaccount.json b/controls/C-0053-accesscontainerserviceaccount.json index 4586605fa..a3a503532 100644 --- a/controls/C-0053-accesscontainerserviceaccount.json +++ b/controls/C-0053-accesscontainerserviceaccount.json @@ -13,7 +13,6 @@ "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", "rulesNames": [ - "access-container-service-account", "access-container-service-account-v1" ], "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", diff --git a/controls/C-0063-portforwardingprivileges.json b/controls/C-0063-portforwardingprivileges.json index 905759773..dc93dc1aa 100644 --- a/controls/C-0063-portforwardingprivileges.json +++ b/controls/C-0063-portforwardingprivileges.json @@ -10,7 +10,6 @@ "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", "rulesNames": [ - "rule-can-portforward", "rule-can-portforward-v1" ], "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", diff --git a/controls/C-0065-noimpersonation.json b/controls/C-0065-noimpersonation.json index e1e872cf8..efa09b17c 100644 --- a/controls/C-0065-noimpersonation.json +++ b/controls/C-0065-noimpersonation.json @@ -12,7 +12,6 @@ "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", "rulesNames": [ - "rule-can-impersonate-users-groups", "rule-can-impersonate-users-groups-v1" ], "controlID": "C-0065", From 10308ca324dca001d68ad41b711bb3d7fac1b14e Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 14 Jan 2024 10:59:22 +0200 Subject: [PATCH 083/195] delete AllowedValues list Signed-off-by: YiscahLevySilas1 --- default-config-inputs.json | 1 - rules/rule-credentials-configmap/raw.rego | 13 ------------- .../rule.metadata.json | 8 +------- rules/rule-credentials-in-env-var/raw.rego | 17 ----------------- .../rule.metadata.json | 8 +------- 5 files changed, 2 insertions(+), 45 deletions(-) diff --git a/default-config-inputs.json b/default-config-inputs.json index b2a748767..043cee3e0 100644 --- a/default-config-inputs.json +++ b/default-config-inputs.json @@ -49,7 +49,6 @@ ], "max_critical_vulnerabilities": ["5"], "max_high_vulnerabilities": ["10"], - "sensitiveValuesAllowed": ["AllowedValue"], "sensitiveKeyNames": [ "aws_access_key_id", "aws_secret_access_key", diff --git a/rules/rule-credentials-configmap/raw.rego b/rules/rule-credentials-configmap/raw.rego index ae664b3ae..8507cfb6b 100644 --- a/rules/rule-credentials-configmap/raw.rego +++ b/rules/rule-credentials-configmap/raw.rego @@ -11,8 +11,6 @@ deny[msga] { map_secret != "" contains(lower(map_key), lower(key_name)) - # check that value wasn't allowed by user - not is_allowed_value(map_secret) path := sprintf("data[%v]", [map_key]) @@ -41,8 +39,6 @@ deny[msga] { map_secret != "" regex.match(value , map_secret) - # check that value wasn't allowed by user - not is_allowed_value(map_secret) path := sprintf("data[%v]", [map_key]) @@ -72,9 +68,6 @@ deny[msga] { decoded_secret := base64.decode(map_secret) - # check that value wasn't allowed by user - not is_allowed_value(map_secret) - regex.match(value , decoded_secret) path := sprintf("data[%v]", [map_key]) @@ -91,9 +84,3 @@ deny[msga] { } } } - - -is_allowed_value(value) { - allow_val := data.postureControlInputs.sensitiveValuesAllowed[_] - value == allow_val -} \ No newline at end of file diff --git a/rules/rule-credentials-configmap/rule.metadata.json b/rules/rule-credentials-configmap/rule.metadata.json index 8091db54e..cc08d8224 100644 --- a/rules/rule-credentials-configmap/rule.metadata.json +++ b/rules/rule-credentials-configmap/rule.metadata.json @@ -20,8 +20,7 @@ "ruleDependencies": [], "configInputs": [ "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" + "settings.postureControlInputs.sensitiveKeyNames" ], "controlConfigInputs": [ { @@ -33,11 +32,6 @@ "path": "settings.postureControlInputs.sensitiveKeyNames", "name": "Keys", "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Explicitly allowed values, which will override sensitiveValues." } ], "description": "fails if ConfigMaps have sensitive information in configuration", diff --git a/rules/rule-credentials-in-env-var/raw.rego b/rules/rule-credentials-in-env-var/raw.rego index e81f1aefc..dcda9b7dc 100644 --- a/rules/rule-credentials-in-env-var/raw.rego +++ b/rules/rule-credentials-in-env-var/raw.rego @@ -11,8 +11,6 @@ contains(lower(env.name), lower(key_name)) env.value != "" - # check that value wasn't allowed by user - not is_allowed_value(env.value) is_not_reference(env) @@ -44,8 +42,6 @@ contains(lower(env.name), lower(key_name)) env.value != "" - # check that value wasn't allowed by user - not is_allowed_value(env.value) is_not_reference(env) @@ -76,8 +72,6 @@ contains(lower(env.name), lower(key_name)) env.value != "" - # check that value wasn't allowed by user - not is_allowed_value(env.value) is_not_reference(env) @@ -106,8 +100,6 @@ deny[msga] { container := pod.spec.containers[i] env := container.env[j] - # check that value wasn't allowed by user - not is_allowed_value(env.value) contains(lower(env.value), lower(value)) is_not_reference(env) @@ -138,9 +130,7 @@ deny[msga] { container := wl.spec.template.spec.containers[i] env := container.env[j] - not is_allowed_value(env.value) contains(lower(env.value), lower(value)) - # check that value wasn't allowed by user is_not_reference(env) @@ -168,8 +158,6 @@ deny[msga] { container := wl.spec.jobTemplate.spec.template.spec.containers[i] env := container.env[j] - # check that value wasn't allowed by user - not is_allowed_value(env.value) contains(lower(env.value), lower(value)) is_not_reference(env) @@ -195,8 +183,3 @@ is_not_reference(env) not env.valueFrom.secretKeyRef not env.valueFrom.configMapKeyRef } - -is_allowed_value(value) { - allow_val := data.postureControlInputs.sensitiveValuesAllowed[_] - value == allow_val -} \ No newline at end of file diff --git a/rules/rule-credentials-in-env-var/rule.metadata.json b/rules/rule-credentials-in-env-var/rule.metadata.json index 8a52cefa1..9859e5c1b 100644 --- a/rules/rule-credentials-in-env-var/rule.metadata.json +++ b/rules/rule-credentials-in-env-var/rule.metadata.json @@ -46,8 +46,7 @@ "ruleDependencies": [], "configInputs": [ "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" + "settings.postureControlInputs.sensitiveKeyNames" ], "controlConfigInputs": [ { @@ -59,11 +58,6 @@ "path": "settings.postureControlInputs.sensitiveKeyNames", "name": "Keys", "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Explicitly allowed values, which will override sensitiveValues." } ], "description": "fails if Pods have sensitive information in configuration", From 7b4f04f3bb62bb2d075e5a3db9fbd92675928f36 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 14 Jan 2024 12:05:04 +0200 Subject: [PATCH 084/195] add path to value in deletePaths Signed-off-by: YiscahLevySilas1 --- rules/rule-credentials-in-env-var/raw.rego | 42 +++++++++++-------- .../test/cronjob/expected.json | 6 ++- .../test/deployment/expected.json | 6 ++- .../test/pod/expected.json | 6 ++- .../test/workloads/expected.json | 6 ++- 5 files changed, 40 insertions(+), 26 deletions(-) diff --git a/rules/rule-credentials-in-env-var/raw.rego b/rules/rule-credentials-in-env-var/raw.rego index dcda9b7dc..88c6407fa 100644 --- a/rules/rule-credentials-in-env-var/raw.rego +++ b/rules/rule-credentials-in-env-var/raw.rego @@ -14,14 +14,15 @@ is_not_reference(env) - path := sprintf("spec.containers[%v].env[%v].name", [format_int(i, 10), format_int(j, 10)]) + paths := [sprintf("spec.containers[%v].env[%v].name", [i, j]), + sprintf("spec.containers[%v].env[%v].value", [i, j])] msga := { "alertMessage": sprintf("Pod: %v has sensitive information in environment variables", [pod.metadata.name]), "alertScore": 9, "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], + "deletePaths": paths, + "failedPaths": paths, "packagename": "armo_builtins", "alertObject": { "k8sApiObjects": [pod] @@ -45,14 +46,15 @@ is_not_reference(env) - path := sprintf("spec.template.spec.containers[%v].env[%v].name", [format_int(i, 10), format_int(j, 10)]) + paths := [sprintf("spec.template.spec.containers[%v].env[%v].name", [i, j]), + sprintf("spec.template.spec.containers[%v].env[%v].value", [i, j])] msga := { "alertMessage": sprintf("%v: %v has sensitive information in environment variables", [wl.kind, wl.metadata.name]), "alertScore": 9, "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], + "deletePaths": paths, + "failedPaths": paths, "packagename": "armo_builtins", "alertObject": { "k8sApiObjects": [wl] @@ -75,14 +77,15 @@ is_not_reference(env) - path := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name", [format_int(i, 10), format_int(j, 10)]) + paths := [sprintf("spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name", [i, j]), + sprintf("spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value", [i, j])] msga := { "alertMessage": sprintf("Cronjob: %v has sensitive information in environment variables", [wl.metadata.name]), "alertScore": 9, "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], + "deletePaths": paths, + "failedPaths": paths, "packagename": "armo_builtins", "alertObject": { "k8sApiObjects": [wl] @@ -104,14 +107,15 @@ deny[msga] { is_not_reference(env) - path := sprintf("spec.containers[%v].env[%v].name", [format_int(i, 10), format_int(j, 10)]) + paths := [sprintf("spec.containers[%v].env[%v].name", [i, j]), + sprintf("spec.containers[%v].env[%v].value", [i, j])] msga := { "alertMessage": sprintf("Pod: %v has sensitive information in environment variables", [pod.metadata.name]), "alertScore": 9, "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], + "deletePaths": paths, + "failedPaths": paths, "packagename": "armo_builtins", "alertObject": { "k8sApiObjects": [pod] @@ -134,14 +138,15 @@ deny[msga] { is_not_reference(env) - path := sprintf("spec.template.spec.containers[%v].env[%v].name", [format_int(i, 10), format_int(j, 10)]) + paths := [sprintf("spec.template.spec.containers[%v].env[%v].name", [i, j]), + sprintf("spec.template.spec.containers[%v].env[%v].value", [i, j])] msga := { "alertMessage": sprintf("%v: %v has sensitive information in environment variables", [wl.kind, wl.metadata.name]), "alertScore": 9, "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], + "deletePaths": paths, + "failedPaths": paths, "packagename": "armo_builtins", "alertObject": { "k8sApiObjects": [wl] @@ -162,14 +167,15 @@ deny[msga] { is_not_reference(env) - path := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name", [format_int(i, 10), format_int(j, 10)]) + paths := [sprintf("spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name", [i, j]), + sprintf("spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value", [i, j])] msga := { "alertMessage": sprintf("Cronjob: %v has sensitive information in environment variables", [wl.metadata.name]), "alertScore": 9, "fixPaths": [], - "deletePaths": [path], - "failedPaths": [path], + "deletePaths": paths, + "failedPaths": paths, "packagename": "armo_builtins", "alertObject": { "k8sApiObjects": [wl] diff --git a/rules/rule-credentials-in-env-var/test/cronjob/expected.json b/rules/rule-credentials-in-env-var/test/cronjob/expected.json index 91409049e..375d3fcdf 100644 --- a/rules/rule-credentials-in-env-var/test/cronjob/expected.json +++ b/rules/rule-credentials-in-env-var/test/cronjob/expected.json @@ -2,10 +2,12 @@ { "alertMessage": "Cronjob: hello has sensitive information in environment variables", "deletePaths": [ - "spec.jobTemplate.spec.template.spec.containers[0].env[0].name" + "spec.jobTemplate.spec.template.spec.containers[0].env[0].name", + "spec.jobTemplate.spec.template.spec.containers[0].env[0].value" ], "failedPaths": [ - "spec.jobTemplate.spec.template.spec.containers[0].env[0].name" + "spec.jobTemplate.spec.template.spec.containers[0].env[0].name", + "spec.jobTemplate.spec.template.spec.containers[0].env[0].value" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-credentials-in-env-var/test/deployment/expected.json b/rules/rule-credentials-in-env-var/test/deployment/expected.json index ede9acff6..758a45c14 100644 --- a/rules/rule-credentials-in-env-var/test/deployment/expected.json +++ b/rules/rule-credentials-in-env-var/test/deployment/expected.json @@ -2,10 +2,12 @@ { "alertMessage": "Deployment: test2 has sensitive information in environment variables", "deletePaths": [ - "spec.template.spec.containers[1].env[1].name" + "spec.template.spec.containers[1].env[1].name", + "spec.template.spec.containers[1].env[1].value" ], "failedPaths": [ - "spec.template.spec.containers[1].env[1].name" + "spec.template.spec.containers[1].env[1].name", + "spec.template.spec.containers[1].env[1].value" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-credentials-in-env-var/test/pod/expected.json b/rules/rule-credentials-in-env-var/test/pod/expected.json index 0f1ab8d87..5e40ddd37 100644 --- a/rules/rule-credentials-in-env-var/test/pod/expected.json +++ b/rules/rule-credentials-in-env-var/test/pod/expected.json @@ -2,10 +2,12 @@ { "alertMessage": "Pod: audit-pod has sensitive information in environment variables", "deletePaths": [ - "spec.containers[0].env[1].name" + "spec.containers[0].env[1].name", + "spec.containers[0].env[1].value" ], "failedPaths": [ - "spec.containers[0].env[1].name" + "spec.containers[0].env[1].name", + "spec.containers[0].env[1].value" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/rule-credentials-in-env-var/test/workloads/expected.json b/rules/rule-credentials-in-env-var/test/workloads/expected.json index d109996e8..38408615a 100644 --- a/rules/rule-credentials-in-env-var/test/workloads/expected.json +++ b/rules/rule-credentials-in-env-var/test/workloads/expected.json @@ -2,10 +2,12 @@ { "alertMessage": "Deployment: test2 has sensitive information in environment variables", "deletePaths": [ - "spec.template.spec.containers[1].env[0].name" + "spec.template.spec.containers[1].env[0].name", + "spec.template.spec.containers[1].env[0].value" ], "failedPaths": [ - "spec.template.spec.containers[1].env[0].name" + "spec.template.spec.containers[1].env[0].name", + "spec.template.spec.containers[1].env[0].value" ], "fixPaths": [], "ruleStatus": "", From e95f84d1fddded177812ff674637267b250b0191 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 18 Jan 2024 14:56:17 +0200 Subject: [PATCH 085/195] refactor non-root-containers Signed-off-by: YiscahLevySilas1 --- controls/C-0013-nonrootcontainers.json | 6 +- ...ecuritycontexttoyourpodsandcontainers.json | 3 +- controls/examples/c013.yaml | 2 +- rules/non-root-containers/raw.rego | 131 ++++++++---------- rules/non-root-containers/rule.metadata.json | 2 +- .../expected.json | 22 ++- .../input/cronjob.yaml | 4 + .../test/cronjob/expected.json | 15 +- .../test/cronjob/input/cronjob.yaml | 2 + .../test/deployment-fail/expected.json | 48 +++++++ .../deployment-fail/input/deployment.yaml | 32 +++++ .../test/deployment-fixed-path/expected.json | 24 ---- .../deployment-fixed-path/input/deploy.yaml | 17 --- .../test/deployment-pass/expected.json | 1 + .../input/deployment.yaml | 3 +- .../test/deployment/expected.json | 21 --- .../test/pod/expected.json | 4 - .../test/pod/input/pod.yaml | 6 +- 18 files changed, 178 insertions(+), 165 deletions(-) rename rules/non-root-containers/test/{cronjob-fixed-path => cronjob-runasuser}/expected.json (66%) rename rules/non-root-containers/test/{cronjob-fixed-path => cronjob-runasuser}/input/cronjob.yaml (79%) create mode 100644 rules/non-root-containers/test/deployment-fail/expected.json create mode 100644 rules/non-root-containers/test/deployment-fail/input/deployment.yaml delete mode 100644 rules/non-root-containers/test/deployment-fixed-path/expected.json delete mode 100644 rules/non-root-containers/test/deployment-fixed-path/input/deploy.yaml create mode 100644 rules/non-root-containers/test/deployment-pass/expected.json rename rules/non-root-containers/test/{deployment => deployment-pass}/input/deployment.yaml (86%) delete mode 100644 rules/non-root-containers/test/deployment/expected.json diff --git a/controls/C-0013-nonrootcontainers.json b/controls/C-0013-nonrootcontainers.json index ac86fbc44..ecf9a146f 100644 --- a/controls/C-0013-nonrootcontainers.json +++ b/controls/C-0013-nonrootcontainers.json @@ -7,12 +7,12 @@ ] }, "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", + "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext or container securityContext and use user ID 1000 or higher, or make sure that runAsNonRoot is true.", "rulesNames": [ "non-root-containers" ], - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser and runAsGroup are set to a user id greater than 0, or that runAsNonRoot is set to true. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", "controlID": "C-0013", "baseScore": 6.0, "example": "@controls/examples/c013.yaml", diff --git a/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json b/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json index d6e3988f3..bd6c6f6fe 100644 --- a/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json +++ b/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json @@ -34,7 +34,8 @@ "set-fsgroup-value", "set-fsgroupchangepolicy-value", "set-sysctls-params", - "set-supplementalgroups-values" + "set-supplementalgroups-values", + "rule-allow-privilege-escalation" ], "baseScore": 8, "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", diff --git a/controls/examples/c013.yaml b/controls/examples/c013.yaml index 6b3ecf544..ce997a920 100644 --- a/controls/examples/c013.yaml +++ b/controls/examples/c013.yaml @@ -12,4 +12,4 @@ spec: image: busybox command: [ "sh", "-c", "sleep 1h" ] securityContext: - allowPrivilegeEscalation: false #lastly, we check this is set to false \ No newline at end of file + runAsNonRoot: false # alternatively, this can be runAsNonRoot: true \ No newline at end of file diff --git a/rules/non-root-containers/raw.rego b/rules/non-root-containers/raw.rego index 1b993a9bb..f682dd83b 100644 --- a/rules/non-root-containers/raw.rego +++ b/rules/non-root-containers/raw.rego @@ -9,17 +9,19 @@ deny[msga] { container := pod.spec.containers[i] start_of_path := "spec" - alertInfo := evaluate_workload_non_root_container(container, pod, start_of_path) - fixPath := get_fixed_path(alertInfo, i) - failed_path := get_failed_path(alertInfo, i) + run_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path) + run_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path) + all_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath) + count(all_fixpaths) > 0 + fixPaths := get_fixed_paths(all_fixpaths, i) msga := { "alertMessage": sprintf("container: %v in pod: %v may run as root", [container.name, pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": failed_path, - "failedPaths": failed_path, - "fixPaths": fixPath, + "reviewPaths": "", + "failedPaths": [], + "fixPaths": fixPaths, "alertObject": { "k8sApiObjects": [pod] } @@ -34,16 +36,19 @@ deny[msga] { container := wl.spec.template.spec.containers[i] start_of_path := "spec.template.spec" - alertInfo := evaluate_workload_non_root_container(container, wl.spec.template, start_of_path) - fixPath := get_fixed_path(alertInfo, i) - failed_path := get_failed_path(alertInfo, i) + run_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path) + run_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path) + all_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath) + count(all_fixpaths) > 0 + fixPaths := get_fixed_paths(all_fixpaths, i) + msga := { - "alertMessage": sprintf("container :%v in %v: %v may run as root", [container.name, wl.kind, wl.metadata.name]), + "alertMessage": sprintf("container: %v in %v: %v may run as root", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": failed_path, - "failedPaths": failed_path, - "fixPaths": fixPath, + "reviewPaths": "", + "failedPaths": [], + "fixPaths": fixPaths, "alertObject": { "k8sApiObjects": [wl] } @@ -57,39 +62,38 @@ deny[msga] { container = wl.spec.jobTemplate.spec.template.spec.containers[i] start_of_path := "spec.jobTemplate.spec.template.spec" - alertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, start_of_path) - fixPath := get_fixed_path(alertInfo, i) - failed_path := get_failed_path(alertInfo, i) + run_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path) + run_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path) + all_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath) + count(all_fixpaths) > 0 + fixPaths := get_fixed_paths(all_fixpaths, i) msga := { - "alertMessage": sprintf("container :%v in %v: %v may run as root", [container.name, wl.kind, wl.metadata.name]), + "alertMessage": sprintf("container: %v in %v: %v may run as root", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": failed_path, - "failedPaths": failed_path, - "fixPaths": fixPath, + "reviewPaths": "", + "failedPaths": [], + "fixPaths": fixPaths, "alertObject": { "k8sApiObjects": [wl] } } } -get_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,"container_ndx",format_int(i,10))] { - alertInfo.failed_path != "" -} else = [] - -get_fixed_path(alertInfo, i) = [{"path":replace(alertInfo.fixPath[0].path,"container_ndx",format_int(i,10)), "value":alertInfo.fixPath[0].value}, {"path":replace(alertInfo.fixPath[1].path,"container_ndx",format_int(i,10)), "value":alertInfo.fixPath[1].value}]{ - count(alertInfo.fixPath) == 2 -} else = [{"path":replace(alertInfo.fixPath[0].path,"container_ndx",format_int(i,10)), "value":alertInfo.fixPath[0].value}] { - count(alertInfo.fixPath) == 1 -} else = [] +get_fixed_paths(all_fixpaths, i) = [{"path":replace(all_fixpaths[0].path,"container_ndx",format_int(i,10)), "value":all_fixpaths[0].value}, {"path":replace(all_fixpaths[1].path,"container_ndx",format_int(i,10)), "value":all_fixpaths[1].value}]{ + count(all_fixpaths) == 2 +} else = [{"path":replace(all_fixpaths[0].path,"container_ndx",format_int(i,10)), "value":all_fixpaths[0].value}] ################################################################################# # Workload evaluation -evaluate_workload_non_root_container(container, pod, start_of_path) = alertInfo { +# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000 +# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true +# all checks are both on the pod and the container level +evaluate_workload_run_as_user(container, pod, start_of_path) = fixPath { runAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path) runAsNonRootValue.value == false @@ -97,70 +101,51 @@ evaluate_workload_non_root_container(container, pod, start_of_path) = alertInfo runAsUserValue.value == 0 alertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue) -} else = alertInfo { - allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, start_of_path) - allowPrivilegeEscalationValue.value == true + fixPath := alertInfo.fixPath +} else = [] - alertInfo := allowPrivilegeEscalationValue -} + +# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000 +# all checks are both on the pod and the container level +evaluate_workload_run_as_group(container, pod, start_of_path) = fixPath { + runAsGroupValue := get_run_as_group_value(container, pod, start_of_path) + runAsGroupValue.value == 0 + + fixPath := runAsGroupValue.fixPath +} else = [] ################################################################################# # Value resolution functions -# TODO - refactor functions, can be simplified get_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot { - runAsNonRoot := {"value" : container.securityContext.runAsNonRoot, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}] ,"defined" : true} + runAsNonRoot := {"value" : container.securityContext.runAsNonRoot, "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}], "defined" : true} } else = runAsNonRoot { - runAsNonRoot := {"value" : pod.spec.securityContext.runAsNonRoot, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}], "defined" : true} -} else = {"value" : false, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}], "defined" : false} { - is_allow_privilege_escalation_field(container, pod) -} else = {"value" : false, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]) , "value":"true"}, {"path":sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], "defined" : false} + runAsNonRoot := {"value" : pod.spec.securityContext.runAsNonRoot, "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}], "defined" : true} +} else = {"value" : false, "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]) , "value":"true"}], "defined" : false} get_run_as_user_value(container, pod, start_of_path) = runAsUser { - failed_path := sprintf("%v.containers[container_ndx].securityContext.runAsUser", [start_of_path]) - runAsUser := {"value" : container.securityContext.runAsUser, "failed_path" : failed_path, "fixPath": [], "defined" : true} + path := sprintf("%v.containers[container_ndx].securityContext.runAsUser", [start_of_path]) + runAsUser := {"value" : container.securityContext.runAsUser, "fixPath": [{"path": path, "value": "1000"}], "defined" : true} } else = runAsUser { - failed_path := sprintf("%v.securityContext.runAsUser", [start_of_path]) - runAsUser := {"value" : pod.spec.securityContext.runAsUser, "failed_path" : failed_path, "fixPath": [],"defined" : true} -} else = {"value" : 0, "failed_path": "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}],"defined" : false}{ - is_allow_privilege_escalation_field(container, pod) -} else = {"value" : 0, "failed_path": "", - "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"},{"path": sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], + path := sprintf("%v.securityContext.runAsUser", [start_of_path]) + runAsUser := {"value" : pod.spec.securityContext.runAsUser, "fixPath": [{"path": path, "value": "1000"}],"defined" : true} +} else = {"value" : 0, "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}], "defined" : false} get_run_as_group_value(container, pod, start_of_path) = runAsGroup { - failed_path := sprintf("%v.containers[container_ndx].securityContext.runAsGroup", [start_of_path]) - runAsGroup := {"value" : container.securityContext.runAsGroup, "failed_path" : failed_path, "fixPath": [],"defined" : true} + path := sprintf("%v.containers[container_ndx].securityContext.runAsGroup", [start_of_path]) + runAsGroup := {"value" : container.securityContext.runAsGroup, "fixPath": [{"path": path, "value": "1000"}],"defined" : true} } else = runAsGroup { - failed_path := sprintf("%v.securityContext.runAsGroup", [start_of_path]) - runAsGroup := {"value" : pod.spec.securityContext.runAsGroup, "failed_path" : failed_path, "fixPath":[], "defined" : true} -} else = {"value" : 0, "failed_path": "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"}], "defined" : false}{ - is_allow_privilege_escalation_field(container, pod) -} else = {"value" : 0, "failed_path": "", - "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsNonRoot", [start_of_path]), "value":"true"},{"path": sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], + path := sprintf("%v.securityContext.runAsGroup", [start_of_path]) + runAsGroup := {"value" : pod.spec.securityContext.runAsGroup, "fixPath":[{"path": path, "value": "1000"}], "defined" : true} +} else = {"value" : 0, "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.runAsGroup", [start_of_path]), "value":"1000"}], "defined" : false } -get_allow_privilege_escalation(container, pod, start_of_path) = allowPrivilegeEscalation { - allowPrivilegeEscalation := {"value" : container.securityContext.allowPrivilegeEscalation, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], "defined" : true} -} else = allowPrivilegeEscalation { - allowPrivilegeEscalation := {"value" : pod.spec.securityContext.allowPrivilegeEscalation, "failed_path" : "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], "defined" : true} -} else = {"value" : true, "failed_path": "", "fixPath": [{"path": sprintf("%v.containers[container_ndx].securityContext.allowPrivilegeEscalation", [start_of_path]), "value":"false"}], "defined" : false} - choose_first_if_defined(l1, l2) = c { l1.defined c := l1 } else = l2 - -is_allow_privilege_escalation_field(container, pod) { - container.securityContext.allowPrivilegeEscalation == false -} - -is_allow_privilege_escalation_field(container, pod) { - pod.spec.securityContext.allowPrivilegeEscalation == false -} - - diff --git a/rules/non-root-containers/rule.metadata.json b/rules/non-root-containers/rule.metadata.json index e43d260cb..315d4b6d0 100644 --- a/rules/non-root-containers/rule.metadata.json +++ b/rules/non-root-containers/rule.metadata.json @@ -45,6 +45,6 @@ "ruleDependencies": [ ], "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", "ruleQuery": "armo_builtins" } \ No newline at end of file diff --git a/rules/non-root-containers/test/cronjob-fixed-path/expected.json b/rules/non-root-containers/test/cronjob-runasuser/expected.json similarity index 66% rename from rules/non-root-containers/test/cronjob-fixed-path/expected.json rename to rules/non-root-containers/test/cronjob-runasuser/expected.json index 435eacac0..a1ff2e9dd 100644 --- a/rules/non-root-containers/test/cronjob-fixed-path/expected.json +++ b/rules/non-root-containers/test/cronjob-runasuser/expected.json @@ -1,13 +1,14 @@ [{ - "alertMessage": "container :hello in CronJob: hello may run as root", + "alertMessage": "container: hello in CronJob: hello may run as root", "reviewPaths": [], "failedPaths": [], "fixPaths": [{ - "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.runAsNonRoot", - "value": "true" - }, { - "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation", - "value": "false" + "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.runAsUser", + "value": "1000" + }, + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.runAsGroup", + "value": "1000" }], "ruleStatus": "", "packagename": "armo_builtins", @@ -22,15 +23,12 @@ }] } }, { - "alertMessage": "container :hello2 in CronJob: hello may run as root", + "alertMessage": "container: hello2 in CronJob: hello may run as root", "reviewPaths": [], "failedPaths": [], "fixPaths": [{ - "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.runAsNonRoot", - "value": "true" - }, { - "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.allowPrivilegeEscalation", - "value": "false" + "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.runAsGroup", + "value": "1000" }], "ruleStatus": "", "packagename": "armo_builtins", diff --git a/rules/non-root-containers/test/cronjob-fixed-path/input/cronjob.yaml b/rules/non-root-containers/test/cronjob-runasuser/input/cronjob.yaml similarity index 79% rename from rules/non-root-containers/test/cronjob-fixed-path/input/cronjob.yaml rename to rules/non-root-containers/test/cronjob-runasuser/input/cronjob.yaml index d5c08bac8..4bcd36b47 100644 --- a/rules/non-root-containers/test/cronjob-fixed-path/input/cronjob.yaml +++ b/rules/non-root-containers/test/cronjob-runasuser/input/cronjob.yaml @@ -16,5 +16,9 @@ spec: - /bin/sh - -c - date; echo Hello from the Kubernetes cluster + securityContext: + runAsUser: 0 - name: hello2 + securityContext: + runAsUser: 1000 restartPolicy: OnFailure \ No newline at end of file diff --git a/rules/non-root-containers/test/cronjob/expected.json b/rules/non-root-containers/test/cronjob/expected.json index 979c609ea..5b499aed2 100644 --- a/rules/non-root-containers/test/cronjob/expected.json +++ b/rules/non-root-containers/test/cronjob/expected.json @@ -1,13 +1,14 @@ [{ - "alertMessage": "container :hello in CronJob: hello may run as root", + "alertMessage": "container: hello in CronJob: hello may run as root", "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.runAsNonRoot", "value": "true" - }, { - "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation", - "value": "false" + }, + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.runAsGroup", + "value": "1000" }], "ruleStatus": "", "packagename": "armo_builtins", @@ -22,12 +23,16 @@ }] } }, { - "alertMessage": "container :hello2 in CronJob: hello may run as root", + "alertMessage": "container: hello2 in CronJob: hello may run as root", "reviewPaths": [], "failedPaths": [], "fixPaths": [{ "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.runAsNonRoot", "value": "true" + }, + { + "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.runAsGroup", + "value": "1000" }], "ruleStatus": "", "packagename": "armo_builtins", diff --git a/rules/non-root-containers/test/cronjob/input/cronjob.yaml b/rules/non-root-containers/test/cronjob/input/cronjob.yaml index de23a020b..c2befc84d 100644 --- a/rules/non-root-containers/test/cronjob/input/cronjob.yaml +++ b/rules/non-root-containers/test/cronjob/input/cronjob.yaml @@ -8,6 +8,8 @@ spec: spec: template: spec: + securityContext: + runAsNonRoot: false containers: - name: hello image: busybox diff --git a/rules/non-root-containers/test/deployment-fail/expected.json b/rules/non-root-containers/test/deployment-fail/expected.json new file mode 100644 index 000000000..1a41e40ba --- /dev/null +++ b/rules/non-root-containers/test/deployment-fail/expected.json @@ -0,0 +1,48 @@ +[{ + "alertMessage": "container: nginx in Deployment: nginx-deployment may run as root", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [{ + "path": "spec.template.spec.containers[0].securityContext.runAsNonRoot", + "value": "true" + }], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "app": "nginx" + }, + "name": "nginx-deployment" + } + }] + } +}, +{ + "alertMessage": "container: nginx2 in Deployment: nginx-deployment may run as root", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [{ + "path": "spec.template.spec.containers[1].securityContext.runAsGroup", + "value": "1000" + }], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "app": "nginx" + }, + "name": "nginx-deployment" + } + }] + } +}] \ No newline at end of file diff --git a/rules/non-root-containers/test/deployment-fail/input/deployment.yaml b/rules/non-root-containers/test/deployment-fail/input/deployment.yaml new file mode 100644 index 000000000..286857f11 --- /dev/null +++ b/rules/non-root-containers/test/deployment-fail/input/deployment.yaml @@ -0,0 +1,32 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + securityContext: + runAsNonRoot: true + containers: + - name: nginx + securityContext: + runAsNonRoot: false + runAsGroup: 2000 + image: nginx:1.14.2 + ports: + - containerPort: 80 + - name: nginx2 + securityContext: + runAsGroup: 0 + image: nginx:1.14.2 + ports: + - containerPort: 80 \ No newline at end of file diff --git a/rules/non-root-containers/test/deployment-fixed-path/expected.json b/rules/non-root-containers/test/deployment-fixed-path/expected.json deleted file mode 100644 index 3b8482116..000000000 --- a/rules/non-root-containers/test/deployment-fixed-path/expected.json +++ /dev/null @@ -1,24 +0,0 @@ -[{ - "alertMessage": "container: web in pod: static-web may run as root", - "reviewPaths": [], - "failedPaths": [], - "fixPaths": [{ - "path": "spec.containers[0].securityContext.runAsNonRoot", - "value": "true" - }], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "labels": { - "role": "myrole" - }, - "name": "static-web" - } - }] - } -}] \ No newline at end of file diff --git a/rules/non-root-containers/test/deployment-fixed-path/input/deploy.yaml b/rules/non-root-containers/test/deployment-fixed-path/input/deploy.yaml deleted file mode 100644 index fef5c078f..000000000 --- a/rules/non-root-containers/test/deployment-fixed-path/input/deploy.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: static-web - labels: - role: myrole -spec: - securityContext: - allowPrivilegeEscalation: false - runAsGroup: 1 - containers: - - name: web - image: nginx - ports: - - name: web - containerPort: 80 - protocol: TCP \ No newline at end of file diff --git a/rules/non-root-containers/test/deployment-pass/expected.json b/rules/non-root-containers/test/deployment-pass/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/non-root-containers/test/deployment-pass/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/non-root-containers/test/deployment/input/deployment.yaml b/rules/non-root-containers/test/deployment-pass/input/deployment.yaml similarity index 86% rename from rules/non-root-containers/test/deployment/input/deployment.yaml rename to rules/non-root-containers/test/deployment-pass/input/deployment.yaml index fc8442f6b..7268e27ac 100644 --- a/rules/non-root-containers/test/deployment/input/deployment.yaml +++ b/rules/non-root-containers/test/deployment-pass/input/deployment.yaml @@ -17,7 +17,8 @@ spec: containers: - name: nginx securityContext: - runAsUser: 0 + runAsNonRoot: true + runAsGroup: 2000 image: nginx:1.14.2 ports: - containerPort: 80 \ No newline at end of file diff --git a/rules/non-root-containers/test/deployment/expected.json b/rules/non-root-containers/test/deployment/expected.json deleted file mode 100644 index 4b8126ff6..000000000 --- a/rules/non-root-containers/test/deployment/expected.json +++ /dev/null @@ -1,21 +0,0 @@ -[{ - "alertMessage": "container :nginx in Deployment: nginx-deployment may run as root", - "reviewPaths": ["spec.template.spec.containers[0].securityContext.runAsUser"], - "failedPaths": ["spec.template.spec.containers[0].securityContext.runAsUser"], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "labels": { - "app": "nginx" - }, - "name": "nginx-deployment" - } - }] - } -}] \ No newline at end of file diff --git a/rules/non-root-containers/test/pod/expected.json b/rules/non-root-containers/test/pod/expected.json index 5687695ac..c9c230d15 100644 --- a/rules/non-root-containers/test/pod/expected.json +++ b/rules/non-root-containers/test/pod/expected.json @@ -7,10 +7,6 @@ { "path": "spec.containers[0].securityContext.runAsNonRoot", "value": "true" - }, - { - "path": "spec.containers[0].securityContext.allowPrivilegeEscalation", - "value": "false" } ], "ruleStatus": "", diff --git a/rules/non-root-containers/test/pod/input/pod.yaml b/rules/non-root-containers/test/pod/input/pod.yaml index cf9f1aac4..cbf23e96b 100644 --- a/rules/non-root-containers/test/pod/input/pod.yaml +++ b/rules/non-root-containers/test/pod/input/pod.yaml @@ -6,11 +6,13 @@ metadata: role: myrole spec: securityContext: - allowPrivilegeEscalation: true + runAsNonRoot: false containers: - name: web image: nginx ports: - name: web containerPort: 80 - protocol: TCP \ No newline at end of file + protocol: TCP + securityContext: + runAsGroup: 1000 \ No newline at end of file From bf27b2699e169e74ca9bcd94fe82e626c66b0e1f Mon Sep 17 00:00:00 2001 From: Ben Date: Tue, 23 Jan 2024 09:44:21 +0200 Subject: [PATCH 086/195] Fixing C-0079 to properly detect CVE-2022-0185 on azure Signed-off-by: Ben --- rules/CVE-2022-0185/raw.rego | 40 +- .../test/test_azure_fail/expected.json | 553 ++++++++++++++++++ .../input/kernelvars.json | 0 .../test/test_azure_fail/input/node.json | 264 +++++++++ .../test/test_azure_pass/expected.json | 1 + .../test_azure_pass/input/kernelvars.json | 536 +++++++++++++++++ .../test/test_azure_pass/input/node.json | 264 +++++++++ .../{test => test_generic_fail}/expected.json | 0 .../test_generic_fail/input/kernelvars.json | 536 +++++++++++++++++ .../input/node.json | 0 .../test/test_generic_pass/expected.json | 1 + .../test_generic_pass/input/kernelvars.json | 536 +++++++++++++++++ .../test/test_generic_pass/input/node.json | 264 +++++++++ 13 files changed, 2988 insertions(+), 7 deletions(-) create mode 100644 rules/CVE-2022-0185/test/test_azure_fail/expected.json rename rules/CVE-2022-0185/test/{test => test_azure_fail}/input/kernelvars.json (100%) create mode 100644 rules/CVE-2022-0185/test/test_azure_fail/input/node.json create mode 100644 rules/CVE-2022-0185/test/test_azure_pass/expected.json create mode 100644 rules/CVE-2022-0185/test/test_azure_pass/input/kernelvars.json create mode 100644 rules/CVE-2022-0185/test/test_azure_pass/input/node.json rename rules/CVE-2022-0185/test/{test => test_generic_fail}/expected.json (100%) create mode 100644 rules/CVE-2022-0185/test/test_generic_fail/input/kernelvars.json rename rules/CVE-2022-0185/test/{test => test_generic_fail}/input/node.json (100%) create mode 100644 rules/CVE-2022-0185/test/test_generic_pass/expected.json create mode 100644 rules/CVE-2022-0185/test/test_generic_pass/input/kernelvars.json create mode 100644 rules/CVE-2022-0185/test/test_generic_pass/input/node.json diff --git a/rules/CVE-2022-0185/raw.rego b/rules/CVE-2022-0185/raw.rego index 912ed4a0b..3dc72ea19 100644 --- a/rules/CVE-2022-0185/raw.rego +++ b/rules/CVE-2022-0185/raw.rego @@ -3,14 +3,11 @@ package armo_builtins deny[msga] { node := input[_] node.kind == "Node" - kernel_version_match := regex.find_all_string_submatch_n(`[0-9]+\.[0-9]+\.[0-9]+`, node.status.nodeInfo.kernelVersion, -1) - kernelVersion := kernel_version_match[0][0] - kernel_version_arr := split(kernelVersion, ".") - to_number(kernel_version_arr[0]) == 5 - to_number(kernel_version_arr[1]) >= 1 - to_number(kernel_version_arr[1]) <= 16 - to_number(kernel_version_arr[2]) < 2 + parsed_kernel_version_arr := parse_kernel_version_to_array(node.status.nodeInfo.kernelVersion) + is_azure := parsed_kernel_version_arr[4] == "azure" + + is_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) node.status.nodeInfo.operatingSystem == "linux" path := "status.nodeInfo.kernelVersion" @@ -40,7 +37,36 @@ deny[msga] { } } +# General Kernel versions are between 5.1.1 and 5.16.2 +is_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) { + is_azure == false + parsed_kernel_version_arr[0] == 5 + parsed_kernel_version_arr[1] >= 1 + parsed_kernel_version_arr[1] <= 16 + parsed_kernel_version_arr[2] < 2 +} + +# Azure kernel version with is 5.4.0-1067-azure +is_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) { + is_azure == true + parsed_kernel_version_arr[0] == 5 + parsed_kernel_version_arr[1] >= 1 + parsed_kernel_version_arr[1] <= 4 + parsed_kernel_version_arr[2] == 0 + parsed_kernel_version_arr[3] < 1067 +} + is_unprivileged_userns_clone_enabled(linux_kernel_var) { linux_kernel_var.key == "unprivileged_userns_clone" linux_kernel_var.value == "1\n" +} + +parse_kernel_version_to_array(kernel_version_str) = output { + version_triplet := regex.find_n(`(\d+\.\d+\.\d+)`, kernel_version_str,-1) + version_triplet_array := split(version_triplet[0],".") + + build_vendor := regex.find_n(`-(\d+)-(\w+)`, kernel_version_str,-1) + build_vendor_array := split(build_vendor[0],"-") + + output := [to_number(version_triplet_array[0]),to_number(version_triplet_array[1]),to_number(version_triplet_array[2]),to_number(build_vendor_array[1]),build_vendor_array[2]] } \ No newline at end of file diff --git a/rules/CVE-2022-0185/test/test_azure_fail/expected.json b/rules/CVE-2022-0185/test/test_azure_fail/expected.json new file mode 100644 index 000000000..58e753420 --- /dev/null +++ b/rules/CVE-2022-0185/test/test_azure_fail/expected.json @@ -0,0 +1,553 @@ +[{ + "alertMessage": "You are vulnerable to CVE-2022-0185", + "reviewPaths": ["kernelVersion"], + "failedPaths": ["kernelVersion"], + "fixPaths": [], + "ruleStatus": "", + "packagename": "", + "alertScore": 0, + "alertObject": { + "externalObjects": { + "kernelVersion": "5.4.0-1059-azure", + "kind": "Node", + "name": "minikube", + "namespace": "", + "relatedObjects": [{ + "apiVersion": "hostdata.kubescape.cloud/v1beta0", + "data": [{ + "key": "acct", + "source": "/proc/sys/kernel/acct", + "value": "4\t2\t30\n" + }, { + "key": "acpi_video_flags", + "source": "/proc/sys/kernel/acpi_video_flags", + "value": "0\n" + }, { + "key": "apparmor_display_secid_mode", + "source": "/proc/sys/kernel/apparmor_display_secid_mode", + "value": "0\n" + }, { + "key": "auto_msgmni", + "source": "/proc/sys/kernel/auto_msgmni", + "value": "0\n" + }, { + "key": "bootloader_type", + "source": "/proc/sys/kernel/bootloader_type", + "value": "114\n" + }, { + "key": "bootloader_version", + "source": "/proc/sys/kernel/bootloader_version", + "value": "2\n" + }, { + "key": "bpf_stats_enabled", + "source": "/proc/sys/kernel/bpf_stats_enabled", + "value": "0\n" + }, { + "key": "cad_pid", + "source": "/proc/sys/kernel/cad_pid", + "value": "0\n" + }, { + "key": "cap_last_cap", + "source": "/proc/sys/kernel/cap_last_cap", + "value": "40\n" + }, { + "key": "core_pattern", + "source": "/proc/sys/kernel/core_pattern", + "value": "|/usr/share/apport/apport %p %s %c %d %P %E\n" + }, { + "key": "core_pipe_limit", + "source": "/proc/sys/kernel/core_pipe_limit", + "value": "0\n" + }, { + "key": "core_uses_pid", + "source": "/proc/sys/kernel/core_uses_pid", + "value": "0\n" + }, { + "key": "ctrl-alt-del", + "source": "/proc/sys/kernel/ctrl-alt-del", + "value": "0\n" + }, { + "key": "dmesg_restrict", + "source": "/proc/sys/kernel/dmesg_restrict", + "value": "0\n" + }, { + "key": "domainname", + "source": "/proc/sys/kernel/domainname", + "value": "(none)\n" + }, { + "key": "force_sysfs_fallback", + "source": "/proc/sys/kernel/firmware_config/force_sysfs_fallback", + "value": "0\n" + }, { + "key": "ignore_sysfs_fallback", + "source": "/proc/sys/kernel/firmware_config/ignore_sysfs_fallback", + "value": "0\n" + }, { + "key": "ftrace_dump_on_oops", + "source": "/proc/sys/kernel/ftrace_dump_on_oops", + "value": "0\n" + }, { + "key": "ftrace_enabled", + "source": "/proc/sys/kernel/ftrace_enabled", + "value": "1\n" + }, { + "key": "hardlockup_all_cpu_backtrace", + "source": "/proc/sys/kernel/hardlockup_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "hardlockup_panic", + "source": "/proc/sys/kernel/hardlockup_panic", + "value": "0\n" + }, { + "key": "hostname", + "source": "/proc/sys/kernel/hostname", + "value": "minikube\n" + }, { + "key": "hotplug", + "source": "/proc/sys/kernel/hotplug", + "value": "\n" + }, { + "key": "hung_task_all_cpu_backtrace", + "source": "/proc/sys/kernel/hung_task_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "hung_task_check_count", + "source": "/proc/sys/kernel/hung_task_check_count", + "value": "4194304\n" + }, { + "key": "hung_task_check_interval_secs", + "source": "/proc/sys/kernel/hung_task_check_interval_secs", + "value": "0\n" + }, { + "key": "hung_task_panic", + "source": "/proc/sys/kernel/hung_task_panic", + "value": "0\n" + }, { + "key": "hung_task_timeout_secs", + "source": "/proc/sys/kernel/hung_task_timeout_secs", + "value": "120\n" + }, { + "key": "hung_task_warnings", + "source": "/proc/sys/kernel/hung_task_warnings", + "value": "10\n" + }, { + "key": "io_delay_type", + "source": "/proc/sys/kernel/io_delay_type", + "value": "1\n" + }, { + "key": "kexec_load_disabled", + "source": "/proc/sys/kernel/kexec_load_disabled", + "value": "0\n" + }, { + "key": "gc_delay", + "source": "/proc/sys/kernel/keys/gc_delay", + "value": "300\n" + }, { + "key": "maxbytes", + "source": "/proc/sys/kernel/keys/maxbytes", + "value": "20000\n" + }, { + "key": "maxkeys", + "source": "/proc/sys/kernel/keys/maxkeys", + "value": "200\n" + }, { + "key": "persistent_keyring_expiry", + "source": "/proc/sys/kernel/keys/persistent_keyring_expiry", + "value": "259200\n" + }, { + "key": "root_maxbytes", + "source": "/proc/sys/kernel/keys/root_maxbytes", + "value": "25000000\n" + }, { + "key": "root_maxkeys", + "source": "/proc/sys/kernel/keys/root_maxkeys", + "value": "1000000\n" + }, { + "key": "kptr_restrict", + "source": "/proc/sys/kernel/kptr_restrict", + "value": "1\n" + }, { + "key": "max_lock_depth", + "source": "/proc/sys/kernel/max_lock_depth", + "value": "1024\n" + }, { + "key": "max_rcu_stall_to_panic", + "source": "/proc/sys/kernel/max_rcu_stall_to_panic", + "value": "0\n" + }, { + "key": "modprobe", + "source": "/proc/sys/kernel/modprobe", + "value": "/sbin/modprobe\n" + }, { + "key": "modules_disabled", + "source": "/proc/sys/kernel/modules_disabled", + "value": "0\n" + }, { + "key": "msg_next_id", + "source": "/proc/sys/kernel/msg_next_id", + "value": "-1\n" + }, { + "key": "msgmax", + "source": "/proc/sys/kernel/msgmax", + "value": "8192\n" + }, { + "key": "msgmnb", + "source": "/proc/sys/kernel/msgmnb", + "value": "16384\n" + }, { + "key": "msgmni", + "source": "/proc/sys/kernel/msgmni", + "value": "32000\n" + }, { + "key": "ngroups_max", + "source": "/proc/sys/kernel/ngroups_max", + "value": "65536\n" + }, { + "key": "nmi_watchdog", + "source": "/proc/sys/kernel/nmi_watchdog", + "value": "0\n" + }, { + "key": "ns_last_pid", + "source": "/proc/sys/kernel/ns_last_pid", + "value": "17618\n" + }, { + "key": "numa_balancing", + "source": "/proc/sys/kernel/numa_balancing", + "value": "0\n" + }, { + "key": "oops_all_cpu_backtrace", + "source": "/proc/sys/kernel/oops_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "osrelease", + "source": "/proc/sys/kernel/osrelease", + "value": "5.13.0-39-generic\n" + }, { + "key": "ostype", + "source": "/proc/sys/kernel/ostype", + "value": "Linux\n" + }, { + "key": "overflowgid", + "source": "/proc/sys/kernel/overflowgid", + "value": "65534\n" + }, { + "key": "overflowuid", + "source": "/proc/sys/kernel/overflowuid", + "value": "65534\n" + }, { + "key": "panic", + "source": "/proc/sys/kernel/panic", + "value": "10\n" + }, { + "key": "panic_on_io_nmi", + "source": "/proc/sys/kernel/panic_on_io_nmi", + "value": "0\n" + }, { + "key": "panic_on_oops", + "source": "/proc/sys/kernel/panic_on_oops", + "value": "1\n" + }, { + "key": "panic_on_rcu_stall", + "source": "/proc/sys/kernel/panic_on_rcu_stall", + "value": "0\n" + }, { + "key": "panic_on_unrecovered_nmi", + "source": "/proc/sys/kernel/panic_on_unrecovered_nmi", + "value": "0\n" + }, { + "key": "panic_on_warn", + "source": "/proc/sys/kernel/panic_on_warn", + "value": "0\n" + }, { + "key": "panic_print", + "source": "/proc/sys/kernel/panic_print", + "value": "0\n" + }, { + "key": "perf_cpu_time_max_percent", + "source": "/proc/sys/kernel/perf_cpu_time_max_percent", + "value": "25\n" + }, { + "key": "perf_event_max_contexts_per_stack", + "source": "/proc/sys/kernel/perf_event_max_contexts_per_stack", + "value": "8\n" + }, { + "key": "perf_event_max_sample_rate", + "source": "/proc/sys/kernel/perf_event_max_sample_rate", + "value": "100000\n" + }, { + "key": "perf_event_max_stack", + "source": "/proc/sys/kernel/perf_event_max_stack", + "value": "127\n" + }, { + "key": "perf_event_mlock_kb", + "source": "/proc/sys/kernel/perf_event_mlock_kb", + "value": "516\n" + }, { + "key": "perf_event_paranoid", + "source": "/proc/sys/kernel/perf_event_paranoid", + "value": "4\n" + }, { + "key": "pid_max", + "source": "/proc/sys/kernel/pid_max", + "value": "4194304\n" + }, { + "key": "poweroff_cmd", + "source": "/proc/sys/kernel/poweroff_cmd", + "value": "/sbin/poweroff\n" + }, { + "key": "print-fatal-signals", + "source": "/proc/sys/kernel/print-fatal-signals", + "value": "0\n" + }, { + "key": "printk", + "source": "/proc/sys/kernel/printk", + "value": "4\t4\t1\t7\n" + }, { + "key": "printk_delay", + "source": "/proc/sys/kernel/printk_delay", + "value": "0\n" + }, { + "key": "printk_devkmsg", + "source": "/proc/sys/kernel/printk_devkmsg", + "value": "on\n" + }, { + "key": "printk_ratelimit", + "source": "/proc/sys/kernel/printk_ratelimit", + "value": "5\n" + }, { + "key": "printk_ratelimit_burst", + "source": "/proc/sys/kernel/printk_ratelimit_burst", + "value": "10\n" + }, { + "key": "max", + "source": "/proc/sys/kernel/pty/max", + "value": "4096\n" + }, { + "key": "nr", + "source": "/proc/sys/kernel/pty/nr", + "value": "4\n" + }, { + "key": "reserve", + "source": "/proc/sys/kernel/pty/reserve", + "value": "1024\n" + }, { + "key": "boot_id", + "source": "/proc/sys/kernel/random/boot_id", + "value": "a025a04b-23a2-44b6-aa3a-2b3d3650bcbb\n" + }, { + "key": "entropy_avail", + "source": "/proc/sys/kernel/random/entropy_avail", + "value": "3806\n" + }, { + "key": "poolsize", + "source": "/proc/sys/kernel/random/poolsize", + "value": "4096\n" + }, { + "key": "urandom_min_reseed_secs", + "source": "/proc/sys/kernel/random/urandom_min_reseed_secs", + "value": "60\n" + }, { + "key": "uuid", + "source": "/proc/sys/kernel/random/uuid", + "value": "7b6b5bf9-9af4-49db-aba6-f0be1c57e2b8\n" + }, { + "key": "write_wakeup_threshold", + "source": "/proc/sys/kernel/random/write_wakeup_threshold", + "value": "896\n" + }, { + "key": "randomize_va_space", + "source": "/proc/sys/kernel/randomize_va_space", + "value": "2\n" + }, { + "key": "real-root-dev", + "source": "/proc/sys/kernel/real-root-dev", + "value": "0\n" + }, { + "key": "sched_autogroup_enabled", + "source": "/proc/sys/kernel/sched_autogroup_enabled", + "value": "1\n" + }, { + "key": "sched_cfs_bandwidth_slice_us", + "source": "/proc/sys/kernel/sched_cfs_bandwidth_slice_us", + "value": "5000\n" + }, { + "key": "sched_child_runs_first", + "source": "/proc/sys/kernel/sched_child_runs_first", + "value": "0\n" + }, { + "key": "sched_deadline_period_max_us", + "source": "/proc/sys/kernel/sched_deadline_period_max_us", + "value": "4194304\n" + }, { + "key": "sched_deadline_period_min_us", + "source": "/proc/sys/kernel/sched_deadline_period_min_us", + "value": "100\n" + }, { + "key": "sched_energy_aware", + "source": "/proc/sys/kernel/sched_energy_aware", + "value": "1\n" + }, { + "key": "sched_rr_timeslice_ms", + "source": "/proc/sys/kernel/sched_rr_timeslice_ms", + "value": "100\n" + }, { + "key": "sched_rt_period_us", + "source": "/proc/sys/kernel/sched_rt_period_us", + "value": "1000000\n" + }, { + "key": "sched_rt_runtime_us", + "source": "/proc/sys/kernel/sched_rt_runtime_us", + "value": "950000\n" + }, { + "key": "sched_schedstats", + "source": "/proc/sys/kernel/sched_schedstats", + "value": "0\n" + }, { + "key": "sched_util_clamp_max", + "source": "/proc/sys/kernel/sched_util_clamp_max", + "value": "1024\n" + }, { + "key": "sched_util_clamp_min", + "source": "/proc/sys/kernel/sched_util_clamp_min", + "value": "1024\n" + }, { + "key": "sched_util_clamp_min_rt_default", + "source": "/proc/sys/kernel/sched_util_clamp_min_rt_default", + "value": "1024\n" + }, { + "key": "actions_avail", + "source": "/proc/sys/kernel/seccomp/actions_avail", + "value": "kill_process kill_thread trap errno user_notif trace log allow\n" + }, { + "key": "actions_logged", + "source": "/proc/sys/kernel/seccomp/actions_logged", + "value": "kill_process kill_thread trap errno user_notif trace log\n" + }, { + "key": "sem", + "source": "/proc/sys/kernel/sem", + "value": "32000\t1024000000\t500\t32000\n" + }, { + "key": "sem_next_id", + "source": "/proc/sys/kernel/sem_next_id", + "value": "-1\n" + }, { + "key": "sg-big-buff", + "source": "/proc/sys/kernel/sg-big-buff", + "value": "32768\n" + }, { + "key": "shm_next_id", + "source": "/proc/sys/kernel/shm_next_id", + "value": "-1\n" + }, { + "key": "shm_rmid_forced", + "source": "/proc/sys/kernel/shm_rmid_forced", + "value": "0\n" + }, { + "key": "shmall", + "source": "/proc/sys/kernel/shmall", + "value": "18446744073692774399\n" + }, { + "key": "shmmax", + "source": "/proc/sys/kernel/shmmax", + "value": "18446744073692774399\n" + }, { + "key": "shmmni", + "source": "/proc/sys/kernel/shmmni", + "value": "4096\n" + }, { + "key": "soft_watchdog", + "source": "/proc/sys/kernel/soft_watchdog", + "value": "1\n" + }, { + "key": "softlockup_all_cpu_backtrace", + "source": "/proc/sys/kernel/softlockup_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "softlockup_panic", + "source": "/proc/sys/kernel/softlockup_panic", + "value": "0\n" + }, { + "key": "stack_tracer_enabled", + "source": "/proc/sys/kernel/stack_tracer_enabled", + "value": "0\n" + }, { + "key": "sysctl_writes_strict", + "source": "/proc/sys/kernel/sysctl_writes_strict", + "value": "1\n" + }, { + "key": "sysrq", + "source": "/proc/sys/kernel/sysrq", + "value": "176\n" + }, { + "key": "tainted", + "source": "/proc/sys/kernel/tainted", + "value": "12288\n" + }, { + "key": "threads-max", + "source": "/proc/sys/kernel/threads-max", + "value": "80984\n" + }, { + "key": "timer_migration", + "source": "/proc/sys/kernel/timer_migration", + "value": "1\n" + }, { + "key": "traceoff_on_warning", + "source": "/proc/sys/kernel/traceoff_on_warning", + "value": "0\n" + }, { + "key": "tracepoint_printk", + "source": "/proc/sys/kernel/tracepoint_printk", + "value": "0\n" + }, { + "key": "unknown_nmi_panic", + "source": "/proc/sys/kernel/unknown_nmi_panic", + "value": "0\n" + }, { + "key": "unprivileged_bpf_disabled", + "source": "/proc/sys/kernel/unprivileged_bpf_disabled", + "value": "2\n" + }, { + "key": "unprivileged_userns_apparmor_policy", + "source": "/proc/sys/kernel/unprivileged_userns_apparmor_policy", + "value": "1\n" + }, { + "key": "unprivileged_userns_clone", + "source": "/proc/sys/kernel/unprivileged_userns_clone", + "value": "1\n" + }, { + "key": "bset", + "source": "/proc/sys/kernel/usermodehelper/bset", + "value": "4294967295\t511\n" + }, { + "key": "inheritable", + "source": "/proc/sys/kernel/usermodehelper/inheritable", + "value": "4294967295\t511\n" + }, { + "key": "version", + "source": "/proc/sys/kernel/version", + "value": "#44~20.04.1-Ubuntu SMP Thu Mar 24 16:43:35 UTC 2022\n" + }, { + "key": "watchdog", + "source": "/proc/sys/kernel/watchdog", + "value": "1\n" + }, { + "key": "watchdog_cpumask", + "source": "/proc/sys/kernel/watchdog_cpumask", + "value": "0-3\n" + }, { + "key": "watchdog_thresh", + "source": "/proc/sys/kernel/watchdog_thresh", + "value": "10\n" + }, { + "key": "ptrace_scope", + "source": "/proc/sys/kernel/yama/ptrace_scope", + "value": "1\n" + }], + "kind": "LinuxKernelVariables", + "metadata": { + "name": "minikube" + } + }] + } + } +}] \ No newline at end of file diff --git a/rules/CVE-2022-0185/test/test/input/kernelvars.json b/rules/CVE-2022-0185/test/test_azure_fail/input/kernelvars.json similarity index 100% rename from rules/CVE-2022-0185/test/test/input/kernelvars.json rename to rules/CVE-2022-0185/test/test_azure_fail/input/kernelvars.json diff --git a/rules/CVE-2022-0185/test/test_azure_fail/input/node.json b/rules/CVE-2022-0185/test/test_azure_fail/input/node.json new file mode 100644 index 000000000..f24295023 --- /dev/null +++ b/rules/CVE-2022-0185/test/test_azure_fail/input/node.json @@ -0,0 +1,264 @@ +{ + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-04-26T05:54:17Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube", + "kubernetes.io/os": "linux", + "minikube.k8s.io/commit": "3e64b11ed75e56e4898ea85f96b2e4af0301f43d", + "minikube.k8s.io/name": "minikube", + "minikube.k8s.io/updated_at": "2022_04_26T08_54_20_0700", + "minikube.k8s.io/version": "v1.25.1", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "", + "node.kubernetes.io/exclude-from-external-load-balancers": "" + }, + "managedFields": [{ + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:metadata": { + "f:annotations": { + ".": {}, + "f:kubeadm.alpha.kubernetes.io/cri-socket": {}, + "f:volumes.kubernetes.io/controller-managed-attach-detach": {} + }, + "f:labels": { + ".": {}, + "f:beta.kubernetes.io/arch": {}, + "f:beta.kubernetes.io/os": {}, + "f:kubernetes.io/arch": {}, + "f:kubernetes.io/hostname": {}, + "f:kubernetes.io/os": {}, + "f:node-role.kubernetes.io/control-plane": {}, + "f:node-role.kubernetes.io/master": {}, + "f:node.kubernetes.io/exclude-from-external-load-balancers": {} + } + } + }, + "manager": "Go-http-client", + "operation": "Update", + "time": "2022-04-26T05:54:20Z" + }, { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:metadata": { + "f:labels": { + "f:minikube.k8s.io/commit": {}, + "f:minikube.k8s.io/name": {}, + "f:minikube.k8s.io/updated_at": {}, + "f:minikube.k8s.io/version": {} + } + } + }, + "manager": "kubectl-label", + "operation": "Update", + "time": "2022-04-26T05:54:21Z" + }, { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:status": { + "f:conditions": { + "k:{\"type\":\"DiskPressure\"}": { + "f:lastHeartbeatTime": {} + }, + "k:{\"type\":\"MemoryPressure\"}": { + "f:lastHeartbeatTime": {} + }, + "k:{\"type\":\"PIDPressure\"}": { + "f:lastHeartbeatTime": {} + }, + "k:{\"type\":\"Ready\"}": { + "f:lastHeartbeatTime": {}, + "f:lastTransitionTime": {}, + "f:message": {}, + "f:reason": {}, + "f:status": {} + } + } + } + }, + "manager": "Go-http-client", + "operation": "Update", + "subresource": "status", + "time": "2022-04-26T05:54:31Z" + }, { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:metadata": { + "f:annotations": { + "f:node.alpha.kubernetes.io/ttl": {} + } + }, + "f:spec": { + "f:podCIDR": {}, + "f:podCIDRs": { + ".": {}, + "v:\"10.244.0.0/24\"": {} + } + } + }, + "manager": "kube-controller-manager", + "operation": "Update", + "time": "2022-04-26T05:54:33Z" + }], + "name": "minikube", + "resourceVersion": "4245", + "uid": "5a3a25d4-b1e5-42d3-a533-4d36f084314e" + }, + "spec": { + "podCIDR": "10.244.0.0/24", + "podCIDRs": ["10.244.0.0/24"] + }, + "status": { + "addresses": [{ + "address": "192.168.49.2", + "type": "InternalIP" + }, { + "address": "minikube", + "type": "Hostname" + }], + "allocatable": { + "cpu": "4", + "ephemeral-storage": "94850516Ki", + "hugepages-2Mi": "0", + "memory": "10432976Ki", + "pods": "110" + }, + "capacity": { + "cpu": "4", + "ephemeral-storage": "94850516Ki", + "hugepages-2Mi": "0", + "memory": "10432976Ki", + "pods": "110" + }, + "conditions": [{ + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:14Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, { + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:14Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, { + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:14Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, { + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:31Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + }], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [{ + "names": ["quay.io/armosec/k8s-ca-vuln-scan-ubi@sha256:275fa8a7a1e58cbd3c94bbf6c6a423970d6b44c5355021f2a7ca937563c26593", "quay.io/armosec/k8s-ca-vuln-scan-ubi:127"], + "sizeBytes": 1018599142 + }, { + "names": ["gcr.io/google-samples/node-hello@sha256:d238d0ab54efb76ec0f7b1da666cefa9b40be59ef34346a761b8adc2dd45459b", "gcr.io/google-samples/node-hello:1.0"], + "sizeBytes": 643762709 + }, { + "names": ["requarks/wiki@sha256:dd83fff15e77843ff934b25c28c865ac000edf7653e5d11adad1dd51df87439d"], + "sizeBytes": 441083858 + }, { + "names": ["mariadb@sha256:821d0411208eaa88f9e1f0daccd1d534f88d19baf724eb9a2777cbedb10b6c66"], + "sizeBytes": 400782682 + }, { + "names": ["k8s.gcr.io/etcd@sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263", "k8s.gcr.io/etcd:3.5.1-0"], + "sizeBytes": 292558922 + }, { + "names": ["kubernetesui/dashboard@sha256:ec27f462cf1946220f5a9ace416a84a57c18f98c777876a8054405d1428cc92e", "kubernetesui/dashboard:v2.3.1"], + "sizeBytes": 220033604 + }, { + "names": ["httpd@sha256:94cd479f4875e3e0fba620baf7a0e9353e15783368f4f74b9ea5bdc729b3f366", "httpd:2.4"], + "sizeBytes": 143610390 + }, { + "names": ["quay.io/armosec/k8s-ca-dashboard-aggregator-ubi@sha256:5dd4c701070c0168dda6bf4932f2752212a6b8f9d70c0fa15f10f29d82ed460a", "quay.io/armosec/k8s-ca-dashboard-aggregator-ubi:185"], + "sizeBytes": 138395979 + }, { + "names": ["k8s.gcr.io/kube-apiserver@sha256:f54681a71cce62cbc1b13ebb3dbf1d880f849112789811f98b6aebd2caa2f255", "k8s.gcr.io/kube-apiserver:v1.23.1"], + "sizeBytes": 135162256 + }, { + "names": ["k8s.gcr.io/kube-controller-manager@sha256:a7ed87380108a2d811f0d392a3fe87546c85bc366e0d1e024dfa74eb14468604", "k8s.gcr.io/kube-controller-manager:v1.23.1"], + "sizeBytes": 124971684 + }, { + "names": ["k8s.gcr.io/kube-proxy@sha256:e40f3a28721588affcf187f3f246d1e078157dabe274003eaa2957a83f7170c8", "k8s.gcr.io/kube-proxy:v1.23.1"], + "sizeBytes": 112327826 + }, { + "names": ["quay.io/armosec/notification-server-ubi@sha256:4fc284ba63683e00468b92db20f51c1209ae475a6d0bd53c1b025964876d0eea", "quay.io/armosec/notification-server-ubi:89"], + "sizeBytes": 109413165 + }, { + "names": ["nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d"], + "sizeBytes": 109129446 + }, { + "names": ["quay.io/armosec/kubescape@sha256:b76503638466be6a9b988890202fa00de0e8806819a4a4438328e50abdac270c", "quay.io/armosec/kubescape:v2.0.149"], + "sizeBytes": 55122796 + }, { + "names": ["k8s.gcr.io/kube-scheduler@sha256:8be4eb1593cf9ff2d91b44596633b7815a3753696031a1eb4273d1b39427fa8c", "k8s.gcr.io/kube-scheduler:v1.23.1"], + "sizeBytes": 53488305 + }, { + "names": ["k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e", "k8s.gcr.io/coredns/coredns:v1.8.6"], + "sizeBytes": 46829283 + }, { + "names": ["quay.io/armosec/k8s-ca-websocket-ubi@sha256:a5eba54aeada7d995f83356dcabb6c505e3922016d29246fa0e8a3c179533861", "quay.io/armosec/k8s-ca-websocket-ubi:458"], + "sizeBytes": 45050289 + }, { + "names": ["kubernetesui/metrics-scraper@sha256:36d5b3f60e1a144cc5ada820910535074bdf5cf73fb70d1ff1681537eef4e172", "kubernetesui/metrics-scraper:v1.0.7"], + "sizeBytes": 34446077 + }, { + "names": ["gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944", "gcr.io/k8s-minikube/storage-provisioner:v5"], + "sizeBytes": 31465472 + }, { + "names": ["quay.io/armosec/kube-host-sensor@sha256:b592a099c72c5f7ccc9da011b9c9f3297e7a60f5910a20f994c9dfa6142d9204"], + "sizeBytes": 11807596 + }, { + "names": ["quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee", "quay.io/armosec/kube-host-sensor:latest"], + "sizeBytes": 11796788 + }, { + "names": ["busybox@sha256:caa382c432891547782ce7140fb3b7304613d3b0438834dce1cad68896ab110a", "busybox:latest"], + "sizeBytes": 1239748 + }, { + "names": ["k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db", "k8s.gcr.io/pause:3.6"], + "sizeBytes": 682696 + }], + "nodeInfo": { + "architecture": "amd64", + "bootID": "a025a04b-23a2-44b6-aa3a-2b3d3650bcbb", + "containerRuntimeVersion": "docker://20.10.12", + "kernelVersion": "5.4.0-1059-azure", + "kubeProxyVersion": "v1.23.1", + "kubeletVersion": "v1.23.1", + "machineID": "8de776e053e140d6a14c2d2def3d6bb8", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.2 LTS", + "systemUUID": "8d013ac0-0dbc-4c34-b2bd-0365fd0fd31c" + } + } +} \ No newline at end of file diff --git a/rules/CVE-2022-0185/test/test_azure_pass/expected.json b/rules/CVE-2022-0185/test/test_azure_pass/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/CVE-2022-0185/test/test_azure_pass/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/CVE-2022-0185/test/test_azure_pass/input/kernelvars.json b/rules/CVE-2022-0185/test/test_azure_pass/input/kernelvars.json new file mode 100644 index 000000000..4f45ec75e --- /dev/null +++ b/rules/CVE-2022-0185/test/test_azure_pass/input/kernelvars.json @@ -0,0 +1,536 @@ +{ + "apiVersion": "hostdata.kubescape.cloud/v1beta0", + "data": [{ + "key": "acct", + "source": "/proc/sys/kernel/acct", + "value": "4\t2\t30\n" + }, { + "key": "acpi_video_flags", + "source": "/proc/sys/kernel/acpi_video_flags", + "value": "0\n" + }, { + "key": "apparmor_display_secid_mode", + "source": "/proc/sys/kernel/apparmor_display_secid_mode", + "value": "0\n" + }, { + "key": "auto_msgmni", + "source": "/proc/sys/kernel/auto_msgmni", + "value": "0\n" + }, { + "key": "bootloader_type", + "source": "/proc/sys/kernel/bootloader_type", + "value": "114\n" + }, { + "key": "bootloader_version", + "source": "/proc/sys/kernel/bootloader_version", + "value": "2\n" + }, { + "key": "bpf_stats_enabled", + "source": "/proc/sys/kernel/bpf_stats_enabled", + "value": "0\n" + }, { + "key": "cad_pid", + "source": "/proc/sys/kernel/cad_pid", + "value": "0\n" + }, { + "key": "cap_last_cap", + "source": "/proc/sys/kernel/cap_last_cap", + "value": "40\n" + }, { + "key": "core_pattern", + "source": "/proc/sys/kernel/core_pattern", + "value": "|/usr/share/apport/apport %p %s %c %d %P %E\n" + }, { + "key": "core_pipe_limit", + "source": "/proc/sys/kernel/core_pipe_limit", + "value": "0\n" + }, { + "key": "core_uses_pid", + "source": "/proc/sys/kernel/core_uses_pid", + "value": "0\n" + }, { + "key": "ctrl-alt-del", + "source": "/proc/sys/kernel/ctrl-alt-del", + "value": "0\n" + }, { + "key": "dmesg_restrict", + "source": "/proc/sys/kernel/dmesg_restrict", + "value": "0\n" + }, { + "key": "domainname", + "source": "/proc/sys/kernel/domainname", + "value": "(none)\n" + }, { + "key": "force_sysfs_fallback", + "source": "/proc/sys/kernel/firmware_config/force_sysfs_fallback", + "value": "0\n" + }, { + "key": "ignore_sysfs_fallback", + "source": "/proc/sys/kernel/firmware_config/ignore_sysfs_fallback", + "value": "0\n" + }, { + "key": "ftrace_dump_on_oops", + "source": "/proc/sys/kernel/ftrace_dump_on_oops", + "value": "0\n" + }, { + "key": "ftrace_enabled", + "source": "/proc/sys/kernel/ftrace_enabled", + "value": "1\n" + }, { + "key": "hardlockup_all_cpu_backtrace", + "source": "/proc/sys/kernel/hardlockup_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "hardlockup_panic", + "source": "/proc/sys/kernel/hardlockup_panic", + "value": "0\n" + }, { + "key": "hostname", + "source": "/proc/sys/kernel/hostname", + "value": "minikube\n" + }, { + "key": "hotplug", + "source": "/proc/sys/kernel/hotplug", + "value": "\n" + }, { + "key": "hung_task_all_cpu_backtrace", + "source": "/proc/sys/kernel/hung_task_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "hung_task_check_count", + "source": "/proc/sys/kernel/hung_task_check_count", + "value": "4194304\n" + }, { + "key": "hung_task_check_interval_secs", + "source": "/proc/sys/kernel/hung_task_check_interval_secs", + "value": "0\n" + }, { + "key": "hung_task_panic", + "source": "/proc/sys/kernel/hung_task_panic", + "value": "0\n" + }, { + "key": "hung_task_timeout_secs", + "source": "/proc/sys/kernel/hung_task_timeout_secs", + "value": "120\n" + }, { + "key": "hung_task_warnings", + "source": "/proc/sys/kernel/hung_task_warnings", + "value": "10\n" + }, { + "key": "io_delay_type", + "source": "/proc/sys/kernel/io_delay_type", + "value": "1\n" + }, { + "key": "kexec_load_disabled", + "source": "/proc/sys/kernel/kexec_load_disabled", + "value": "0\n" + }, { + "key": "gc_delay", + "source": "/proc/sys/kernel/keys/gc_delay", + "value": "300\n" + }, { + "key": "maxbytes", + "source": "/proc/sys/kernel/keys/maxbytes", + "value": "20000\n" + }, { + "key": "maxkeys", + "source": "/proc/sys/kernel/keys/maxkeys", + "value": "200\n" + }, { + "key": "persistent_keyring_expiry", + "source": "/proc/sys/kernel/keys/persistent_keyring_expiry", + "value": "259200\n" + }, { + "key": "root_maxbytes", + "source": "/proc/sys/kernel/keys/root_maxbytes", + "value": "25000000\n" + }, { + "key": "root_maxkeys", + "source": "/proc/sys/kernel/keys/root_maxkeys", + "value": "1000000\n" + }, { + "key": "kptr_restrict", + "source": "/proc/sys/kernel/kptr_restrict", + "value": "1\n" + }, { + "key": "max_lock_depth", + "source": "/proc/sys/kernel/max_lock_depth", + "value": "1024\n" + }, { + "key": "max_rcu_stall_to_panic", + "source": "/proc/sys/kernel/max_rcu_stall_to_panic", + "value": "0\n" + }, { + "key": "modprobe", + "source": "/proc/sys/kernel/modprobe", + "value": "/sbin/modprobe\n" + }, { + "key": "modules_disabled", + "source": "/proc/sys/kernel/modules_disabled", + "value": "0\n" + }, { + "key": "msg_next_id", + "source": "/proc/sys/kernel/msg_next_id", + "value": "-1\n" + }, { + "key": "msgmax", + "source": "/proc/sys/kernel/msgmax", + "value": "8192\n" + }, { + "key": "msgmnb", + "source": "/proc/sys/kernel/msgmnb", + "value": "16384\n" + }, { + "key": "msgmni", + "source": "/proc/sys/kernel/msgmni", + "value": "32000\n" + }, { + "key": "ngroups_max", + "source": "/proc/sys/kernel/ngroups_max", + "value": "65536\n" + }, { + "key": "nmi_watchdog", + "source": "/proc/sys/kernel/nmi_watchdog", + "value": "0\n" + }, { + "key": "ns_last_pid", + "source": "/proc/sys/kernel/ns_last_pid", + "value": "17618\n" + }, { + "key": "numa_balancing", + "source": "/proc/sys/kernel/numa_balancing", + "value": "0\n" + }, { + "key": "oops_all_cpu_backtrace", + "source": "/proc/sys/kernel/oops_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "osrelease", + "source": "/proc/sys/kernel/osrelease", + "value": "5.13.0-39-generic\n" + }, { + "key": "ostype", + "source": "/proc/sys/kernel/ostype", + "value": "Linux\n" + }, { + "key": "overflowgid", + "source": "/proc/sys/kernel/overflowgid", + "value": "65534\n" + }, { + "key": "overflowuid", + "source": "/proc/sys/kernel/overflowuid", + "value": "65534\n" + }, { + "key": "panic", + "source": "/proc/sys/kernel/panic", + "value": "10\n" + }, { + "key": "panic_on_io_nmi", + "source": "/proc/sys/kernel/panic_on_io_nmi", + "value": "0\n" + }, { + "key": "panic_on_oops", + "source": "/proc/sys/kernel/panic_on_oops", + "value": "1\n" + }, { + "key": "panic_on_rcu_stall", + "source": "/proc/sys/kernel/panic_on_rcu_stall", + "value": "0\n" + }, { + "key": "panic_on_unrecovered_nmi", + "source": "/proc/sys/kernel/panic_on_unrecovered_nmi", + "value": "0\n" + }, { + "key": "panic_on_warn", + "source": "/proc/sys/kernel/panic_on_warn", + "value": "0\n" + }, { + "key": "panic_print", + "source": "/proc/sys/kernel/panic_print", + "value": "0\n" + }, { + "key": "perf_cpu_time_max_percent", + "source": "/proc/sys/kernel/perf_cpu_time_max_percent", + "value": "25\n" + }, { + "key": "perf_event_max_contexts_per_stack", + "source": "/proc/sys/kernel/perf_event_max_contexts_per_stack", + "value": "8\n" + }, { + "key": "perf_event_max_sample_rate", + "source": "/proc/sys/kernel/perf_event_max_sample_rate", + "value": "100000\n" + }, { + "key": "perf_event_max_stack", + "source": "/proc/sys/kernel/perf_event_max_stack", + "value": "127\n" + }, { + "key": "perf_event_mlock_kb", + "source": "/proc/sys/kernel/perf_event_mlock_kb", + "value": "516\n" + }, { + "key": "perf_event_paranoid", + "source": "/proc/sys/kernel/perf_event_paranoid", + "value": "4\n" + }, { + "key": "pid_max", + "source": "/proc/sys/kernel/pid_max", + "value": "4194304\n" + }, { + "key": "poweroff_cmd", + "source": "/proc/sys/kernel/poweroff_cmd", + "value": "/sbin/poweroff\n" + }, { + "key": "print-fatal-signals", + "source": "/proc/sys/kernel/print-fatal-signals", + "value": "0\n" + }, { + "key": "printk", + "source": "/proc/sys/kernel/printk", + "value": "4\t4\t1\t7\n" + }, { + "key": "printk_delay", + "source": "/proc/sys/kernel/printk_delay", + "value": "0\n" + }, { + "key": "printk_devkmsg", + "source": "/proc/sys/kernel/printk_devkmsg", + "value": "on\n" + }, { + "key": "printk_ratelimit", + "source": "/proc/sys/kernel/printk_ratelimit", + "value": "5\n" + }, { + "key": "printk_ratelimit_burst", + "source": "/proc/sys/kernel/printk_ratelimit_burst", + "value": "10\n" + }, { + "key": "max", + "source": "/proc/sys/kernel/pty/max", + "value": "4096\n" + }, { + "key": "nr", + "source": "/proc/sys/kernel/pty/nr", + "value": "4\n" + }, { + "key": "reserve", + "source": "/proc/sys/kernel/pty/reserve", + "value": "1024\n" + }, { + "key": "boot_id", + "source": "/proc/sys/kernel/random/boot_id", + "value": "a025a04b-23a2-44b6-aa3a-2b3d3650bcbb\n" + }, { + "key": "entropy_avail", + "source": "/proc/sys/kernel/random/entropy_avail", + "value": "3806\n" + }, { + "key": "poolsize", + "source": "/proc/sys/kernel/random/poolsize", + "value": "4096\n" + }, { + "key": "urandom_min_reseed_secs", + "source": "/proc/sys/kernel/random/urandom_min_reseed_secs", + "value": "60\n" + }, { + "key": "uuid", + "source": "/proc/sys/kernel/random/uuid", + "value": "7b6b5bf9-9af4-49db-aba6-f0be1c57e2b8\n" + }, { + "key": "write_wakeup_threshold", + "source": "/proc/sys/kernel/random/write_wakeup_threshold", + "value": "896\n" + }, { + "key": "randomize_va_space", + "source": "/proc/sys/kernel/randomize_va_space", + "value": "2\n" + }, { + "key": "real-root-dev", + "source": "/proc/sys/kernel/real-root-dev", + "value": "0\n" + }, { + "key": "sched_autogroup_enabled", + "source": "/proc/sys/kernel/sched_autogroup_enabled", + "value": "1\n" + }, { + "key": "sched_cfs_bandwidth_slice_us", + "source": "/proc/sys/kernel/sched_cfs_bandwidth_slice_us", + "value": "5000\n" + }, { + "key": "sched_child_runs_first", + "source": "/proc/sys/kernel/sched_child_runs_first", + "value": "0\n" + }, { + "key": "sched_deadline_period_max_us", + "source": "/proc/sys/kernel/sched_deadline_period_max_us", + "value": "4194304\n" + }, { + "key": "sched_deadline_period_min_us", + "source": "/proc/sys/kernel/sched_deadline_period_min_us", + "value": "100\n" + }, { + "key": "sched_energy_aware", + "source": "/proc/sys/kernel/sched_energy_aware", + "value": "1\n" + }, { + "key": "sched_rr_timeslice_ms", + "source": "/proc/sys/kernel/sched_rr_timeslice_ms", + "value": "100\n" + }, { + "key": "sched_rt_period_us", + "source": "/proc/sys/kernel/sched_rt_period_us", + "value": "1000000\n" + }, { + "key": "sched_rt_runtime_us", + "source": "/proc/sys/kernel/sched_rt_runtime_us", + "value": "950000\n" + }, { + "key": "sched_schedstats", + "source": "/proc/sys/kernel/sched_schedstats", + "value": "0\n" + }, { + "key": "sched_util_clamp_max", + "source": "/proc/sys/kernel/sched_util_clamp_max", + "value": "1024\n" + }, { + "key": "sched_util_clamp_min", + "source": "/proc/sys/kernel/sched_util_clamp_min", + "value": "1024\n" + }, { + "key": "sched_util_clamp_min_rt_default", + "source": "/proc/sys/kernel/sched_util_clamp_min_rt_default", + "value": "1024\n" + }, { + "key": "actions_avail", + "source": "/proc/sys/kernel/seccomp/actions_avail", + "value": "kill_process kill_thread trap errno user_notif trace log allow\n" + }, { + "key": "actions_logged", + "source": "/proc/sys/kernel/seccomp/actions_logged", + "value": "kill_process kill_thread trap errno user_notif trace log\n" + }, { + "key": "sem", + "source": "/proc/sys/kernel/sem", + "value": "32000\t1024000000\t500\t32000\n" + }, { + "key": "sem_next_id", + "source": "/proc/sys/kernel/sem_next_id", + "value": "-1\n" + }, { + "key": "sg-big-buff", + "source": "/proc/sys/kernel/sg-big-buff", + "value": "32768\n" + }, { + "key": "shm_next_id", + "source": "/proc/sys/kernel/shm_next_id", + "value": "-1\n" + }, { + "key": "shm_rmid_forced", + "source": "/proc/sys/kernel/shm_rmid_forced", + "value": "0\n" + }, { + "key": "shmall", + "source": "/proc/sys/kernel/shmall", + "value": "18446744073692774399\n" + }, { + "key": "shmmax", + "source": "/proc/sys/kernel/shmmax", + "value": "18446744073692774399\n" + }, { + "key": "shmmni", + "source": "/proc/sys/kernel/shmmni", + "value": "4096\n" + }, { + "key": "soft_watchdog", + "source": "/proc/sys/kernel/soft_watchdog", + "value": "1\n" + }, { + "key": "softlockup_all_cpu_backtrace", + "source": "/proc/sys/kernel/softlockup_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "softlockup_panic", + "source": "/proc/sys/kernel/softlockup_panic", + "value": "0\n" + }, { + "key": "stack_tracer_enabled", + "source": "/proc/sys/kernel/stack_tracer_enabled", + "value": "0\n" + }, { + "key": "sysctl_writes_strict", + "source": "/proc/sys/kernel/sysctl_writes_strict", + "value": "1\n" + }, { + "key": "sysrq", + "source": "/proc/sys/kernel/sysrq", + "value": "176\n" + }, { + "key": "tainted", + "source": "/proc/sys/kernel/tainted", + "value": "12288\n" + }, { + "key": "threads-max", + "source": "/proc/sys/kernel/threads-max", + "value": "80984\n" + }, { + "key": "timer_migration", + "source": "/proc/sys/kernel/timer_migration", + "value": "1\n" + }, { + "key": "traceoff_on_warning", + "source": "/proc/sys/kernel/traceoff_on_warning", + "value": "0\n" + }, { + "key": "tracepoint_printk", + "source": "/proc/sys/kernel/tracepoint_printk", + "value": "0\n" + }, { + "key": "unknown_nmi_panic", + "source": "/proc/sys/kernel/unknown_nmi_panic", + "value": "0\n" + }, { + "key": "unprivileged_bpf_disabled", + "source": "/proc/sys/kernel/unprivileged_bpf_disabled", + "value": "2\n" + }, { + "key": "unprivileged_userns_apparmor_policy", + "source": "/proc/sys/kernel/unprivileged_userns_apparmor_policy", + "value": "1\n" + }, { + "key": "unprivileged_userns_clone", + "source": "/proc/sys/kernel/unprivileged_userns_clone", + "value": "1\n" + }, { + "key": "bset", + "source": "/proc/sys/kernel/usermodehelper/bset", + "value": "4294967295\t511\n" + }, { + "key": "inheritable", + "source": "/proc/sys/kernel/usermodehelper/inheritable", + "value": "4294967295\t511\n" + }, { + "key": "version", + "source": "/proc/sys/kernel/version", + "value": "#44~20.04.1-Ubuntu SMP Thu Mar 24 16:43:35 UTC 2022\n" + }, { + "key": "watchdog", + "source": "/proc/sys/kernel/watchdog", + "value": "1\n" + }, { + "key": "watchdog_cpumask", + "source": "/proc/sys/kernel/watchdog_cpumask", + "value": "0-3\n" + }, { + "key": "watchdog_thresh", + "source": "/proc/sys/kernel/watchdog_thresh", + "value": "10\n" + }, { + "key": "ptrace_scope", + "source": "/proc/sys/kernel/yama/ptrace_scope", + "value": "1\n" + }], + "kind": "LinuxKernelVariables", + "metadata": { + "name": "minikube" + } +} \ No newline at end of file diff --git a/rules/CVE-2022-0185/test/test_azure_pass/input/node.json b/rules/CVE-2022-0185/test/test_azure_pass/input/node.json new file mode 100644 index 000000000..024b35095 --- /dev/null +++ b/rules/CVE-2022-0185/test/test_azure_pass/input/node.json @@ -0,0 +1,264 @@ +{ + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-04-26T05:54:17Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube", + "kubernetes.io/os": "linux", + "minikube.k8s.io/commit": "3e64b11ed75e56e4898ea85f96b2e4af0301f43d", + "minikube.k8s.io/name": "minikube", + "minikube.k8s.io/updated_at": "2022_04_26T08_54_20_0700", + "minikube.k8s.io/version": "v1.25.1", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "", + "node.kubernetes.io/exclude-from-external-load-balancers": "" + }, + "managedFields": [{ + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:metadata": { + "f:annotations": { + ".": {}, + "f:kubeadm.alpha.kubernetes.io/cri-socket": {}, + "f:volumes.kubernetes.io/controller-managed-attach-detach": {} + }, + "f:labels": { + ".": {}, + "f:beta.kubernetes.io/arch": {}, + "f:beta.kubernetes.io/os": {}, + "f:kubernetes.io/arch": {}, + "f:kubernetes.io/hostname": {}, + "f:kubernetes.io/os": {}, + "f:node-role.kubernetes.io/control-plane": {}, + "f:node-role.kubernetes.io/master": {}, + "f:node.kubernetes.io/exclude-from-external-load-balancers": {} + } + } + }, + "manager": "Go-http-client", + "operation": "Update", + "time": "2022-04-26T05:54:20Z" + }, { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:metadata": { + "f:labels": { + "f:minikube.k8s.io/commit": {}, + "f:minikube.k8s.io/name": {}, + "f:minikube.k8s.io/updated_at": {}, + "f:minikube.k8s.io/version": {} + } + } + }, + "manager": "kubectl-label", + "operation": "Update", + "time": "2022-04-26T05:54:21Z" + }, { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:status": { + "f:conditions": { + "k:{\"type\":\"DiskPressure\"}": { + "f:lastHeartbeatTime": {} + }, + "k:{\"type\":\"MemoryPressure\"}": { + "f:lastHeartbeatTime": {} + }, + "k:{\"type\":\"PIDPressure\"}": { + "f:lastHeartbeatTime": {} + }, + "k:{\"type\":\"Ready\"}": { + "f:lastHeartbeatTime": {}, + "f:lastTransitionTime": {}, + "f:message": {}, + "f:reason": {}, + "f:status": {} + } + } + } + }, + "manager": "Go-http-client", + "operation": "Update", + "subresource": "status", + "time": "2022-04-26T05:54:31Z" + }, { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:metadata": { + "f:annotations": { + "f:node.alpha.kubernetes.io/ttl": {} + } + }, + "f:spec": { + "f:podCIDR": {}, + "f:podCIDRs": { + ".": {}, + "v:\"10.244.0.0/24\"": {} + } + } + }, + "manager": "kube-controller-manager", + "operation": "Update", + "time": "2022-04-26T05:54:33Z" + }], + "name": "minikube", + "resourceVersion": "4245", + "uid": "5a3a25d4-b1e5-42d3-a533-4d36f084314e" + }, + "spec": { + "podCIDR": "10.244.0.0/24", + "podCIDRs": ["10.244.0.0/24"] + }, + "status": { + "addresses": [{ + "address": "192.168.49.2", + "type": "InternalIP" + }, { + "address": "minikube", + "type": "Hostname" + }], + "allocatable": { + "cpu": "4", + "ephemeral-storage": "94850516Ki", + "hugepages-2Mi": "0", + "memory": "10432976Ki", + "pods": "110" + }, + "capacity": { + "cpu": "4", + "ephemeral-storage": "94850516Ki", + "hugepages-2Mi": "0", + "memory": "10432976Ki", + "pods": "110" + }, + "conditions": [{ + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:14Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, { + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:14Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, { + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:14Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, { + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:31Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + }], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [{ + "names": ["quay.io/armosec/k8s-ca-vuln-scan-ubi@sha256:275fa8a7a1e58cbd3c94bbf6c6a423970d6b44c5355021f2a7ca937563c26593", "quay.io/armosec/k8s-ca-vuln-scan-ubi:127"], + "sizeBytes": 1018599142 + }, { + "names": ["gcr.io/google-samples/node-hello@sha256:d238d0ab54efb76ec0f7b1da666cefa9b40be59ef34346a761b8adc2dd45459b", "gcr.io/google-samples/node-hello:1.0"], + "sizeBytes": 643762709 + }, { + "names": ["requarks/wiki@sha256:dd83fff15e77843ff934b25c28c865ac000edf7653e5d11adad1dd51df87439d"], + "sizeBytes": 441083858 + }, { + "names": ["mariadb@sha256:821d0411208eaa88f9e1f0daccd1d534f88d19baf724eb9a2777cbedb10b6c66"], + "sizeBytes": 400782682 + }, { + "names": ["k8s.gcr.io/etcd@sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263", "k8s.gcr.io/etcd:3.5.1-0"], + "sizeBytes": 292558922 + }, { + "names": ["kubernetesui/dashboard@sha256:ec27f462cf1946220f5a9ace416a84a57c18f98c777876a8054405d1428cc92e", "kubernetesui/dashboard:v2.3.1"], + "sizeBytes": 220033604 + }, { + "names": ["httpd@sha256:94cd479f4875e3e0fba620baf7a0e9353e15783368f4f74b9ea5bdc729b3f366", "httpd:2.4"], + "sizeBytes": 143610390 + }, { + "names": ["quay.io/armosec/k8s-ca-dashboard-aggregator-ubi@sha256:5dd4c701070c0168dda6bf4932f2752212a6b8f9d70c0fa15f10f29d82ed460a", "quay.io/armosec/k8s-ca-dashboard-aggregator-ubi:185"], + "sizeBytes": 138395979 + }, { + "names": ["k8s.gcr.io/kube-apiserver@sha256:f54681a71cce62cbc1b13ebb3dbf1d880f849112789811f98b6aebd2caa2f255", "k8s.gcr.io/kube-apiserver:v1.23.1"], + "sizeBytes": 135162256 + }, { + "names": ["k8s.gcr.io/kube-controller-manager@sha256:a7ed87380108a2d811f0d392a3fe87546c85bc366e0d1e024dfa74eb14468604", "k8s.gcr.io/kube-controller-manager:v1.23.1"], + "sizeBytes": 124971684 + }, { + "names": ["k8s.gcr.io/kube-proxy@sha256:e40f3a28721588affcf187f3f246d1e078157dabe274003eaa2957a83f7170c8", "k8s.gcr.io/kube-proxy:v1.23.1"], + "sizeBytes": 112327826 + }, { + "names": ["quay.io/armosec/notification-server-ubi@sha256:4fc284ba63683e00468b92db20f51c1209ae475a6d0bd53c1b025964876d0eea", "quay.io/armosec/notification-server-ubi:89"], + "sizeBytes": 109413165 + }, { + "names": ["nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d"], + "sizeBytes": 109129446 + }, { + "names": ["quay.io/armosec/kubescape@sha256:b76503638466be6a9b988890202fa00de0e8806819a4a4438328e50abdac270c", "quay.io/armosec/kubescape:v2.0.149"], + "sizeBytes": 55122796 + }, { + "names": ["k8s.gcr.io/kube-scheduler@sha256:8be4eb1593cf9ff2d91b44596633b7815a3753696031a1eb4273d1b39427fa8c", "k8s.gcr.io/kube-scheduler:v1.23.1"], + "sizeBytes": 53488305 + }, { + "names": ["k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e", "k8s.gcr.io/coredns/coredns:v1.8.6"], + "sizeBytes": 46829283 + }, { + "names": ["quay.io/armosec/k8s-ca-websocket-ubi@sha256:a5eba54aeada7d995f83356dcabb6c505e3922016d29246fa0e8a3c179533861", "quay.io/armosec/k8s-ca-websocket-ubi:458"], + "sizeBytes": 45050289 + }, { + "names": ["kubernetesui/metrics-scraper@sha256:36d5b3f60e1a144cc5ada820910535074bdf5cf73fb70d1ff1681537eef4e172", "kubernetesui/metrics-scraper:v1.0.7"], + "sizeBytes": 34446077 + }, { + "names": ["gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944", "gcr.io/k8s-minikube/storage-provisioner:v5"], + "sizeBytes": 31465472 + }, { + "names": ["quay.io/armosec/kube-host-sensor@sha256:b592a099c72c5f7ccc9da011b9c9f3297e7a60f5910a20f994c9dfa6142d9204"], + "sizeBytes": 11807596 + }, { + "names": ["quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee", "quay.io/armosec/kube-host-sensor:latest"], + "sizeBytes": 11796788 + }, { + "names": ["busybox@sha256:caa382c432891547782ce7140fb3b7304613d3b0438834dce1cad68896ab110a", "busybox:latest"], + "sizeBytes": 1239748 + }, { + "names": ["k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db", "k8s.gcr.io/pause:3.6"], + "sizeBytes": 682696 + }], + "nodeInfo": { + "architecture": "amd64", + "bootID": "a025a04b-23a2-44b6-aa3a-2b3d3650bcbb", + "containerRuntimeVersion": "docker://20.10.12", + "kernelVersion": "5.4.0-1067-azure", + "kubeProxyVersion": "v1.23.1", + "kubeletVersion": "v1.23.1", + "machineID": "8de776e053e140d6a14c2d2def3d6bb8", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.2 LTS", + "systemUUID": "8d013ac0-0dbc-4c34-b2bd-0365fd0fd31c" + } + } +} \ No newline at end of file diff --git a/rules/CVE-2022-0185/test/test/expected.json b/rules/CVE-2022-0185/test/test_generic_fail/expected.json similarity index 100% rename from rules/CVE-2022-0185/test/test/expected.json rename to rules/CVE-2022-0185/test/test_generic_fail/expected.json diff --git a/rules/CVE-2022-0185/test/test_generic_fail/input/kernelvars.json b/rules/CVE-2022-0185/test/test_generic_fail/input/kernelvars.json new file mode 100644 index 000000000..4f45ec75e --- /dev/null +++ b/rules/CVE-2022-0185/test/test_generic_fail/input/kernelvars.json @@ -0,0 +1,536 @@ +{ + "apiVersion": "hostdata.kubescape.cloud/v1beta0", + "data": [{ + "key": "acct", + "source": "/proc/sys/kernel/acct", + "value": "4\t2\t30\n" + }, { + "key": "acpi_video_flags", + "source": "/proc/sys/kernel/acpi_video_flags", + "value": "0\n" + }, { + "key": "apparmor_display_secid_mode", + "source": "/proc/sys/kernel/apparmor_display_secid_mode", + "value": "0\n" + }, { + "key": "auto_msgmni", + "source": "/proc/sys/kernel/auto_msgmni", + "value": "0\n" + }, { + "key": "bootloader_type", + "source": "/proc/sys/kernel/bootloader_type", + "value": "114\n" + }, { + "key": "bootloader_version", + "source": "/proc/sys/kernel/bootloader_version", + "value": "2\n" + }, { + "key": "bpf_stats_enabled", + "source": "/proc/sys/kernel/bpf_stats_enabled", + "value": "0\n" + }, { + "key": "cad_pid", + "source": "/proc/sys/kernel/cad_pid", + "value": "0\n" + }, { + "key": "cap_last_cap", + "source": "/proc/sys/kernel/cap_last_cap", + "value": "40\n" + }, { + "key": "core_pattern", + "source": "/proc/sys/kernel/core_pattern", + "value": "|/usr/share/apport/apport %p %s %c %d %P %E\n" + }, { + "key": "core_pipe_limit", + "source": "/proc/sys/kernel/core_pipe_limit", + "value": "0\n" + }, { + "key": "core_uses_pid", + "source": "/proc/sys/kernel/core_uses_pid", + "value": "0\n" + }, { + "key": "ctrl-alt-del", + "source": "/proc/sys/kernel/ctrl-alt-del", + "value": "0\n" + }, { + "key": "dmesg_restrict", + "source": "/proc/sys/kernel/dmesg_restrict", + "value": "0\n" + }, { + "key": "domainname", + "source": "/proc/sys/kernel/domainname", + "value": "(none)\n" + }, { + "key": "force_sysfs_fallback", + "source": "/proc/sys/kernel/firmware_config/force_sysfs_fallback", + "value": "0\n" + }, { + "key": "ignore_sysfs_fallback", + "source": "/proc/sys/kernel/firmware_config/ignore_sysfs_fallback", + "value": "0\n" + }, { + "key": "ftrace_dump_on_oops", + "source": "/proc/sys/kernel/ftrace_dump_on_oops", + "value": "0\n" + }, { + "key": "ftrace_enabled", + "source": "/proc/sys/kernel/ftrace_enabled", + "value": "1\n" + }, { + "key": "hardlockup_all_cpu_backtrace", + "source": "/proc/sys/kernel/hardlockup_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "hardlockup_panic", + "source": "/proc/sys/kernel/hardlockup_panic", + "value": "0\n" + }, { + "key": "hostname", + "source": "/proc/sys/kernel/hostname", + "value": "minikube\n" + }, { + "key": "hotplug", + "source": "/proc/sys/kernel/hotplug", + "value": "\n" + }, { + "key": "hung_task_all_cpu_backtrace", + "source": "/proc/sys/kernel/hung_task_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "hung_task_check_count", + "source": "/proc/sys/kernel/hung_task_check_count", + "value": "4194304\n" + }, { + "key": "hung_task_check_interval_secs", + "source": "/proc/sys/kernel/hung_task_check_interval_secs", + "value": "0\n" + }, { + "key": "hung_task_panic", + "source": "/proc/sys/kernel/hung_task_panic", + "value": "0\n" + }, { + "key": "hung_task_timeout_secs", + "source": "/proc/sys/kernel/hung_task_timeout_secs", + "value": "120\n" + }, { + "key": "hung_task_warnings", + "source": "/proc/sys/kernel/hung_task_warnings", + "value": "10\n" + }, { + "key": "io_delay_type", + "source": "/proc/sys/kernel/io_delay_type", + "value": "1\n" + }, { + "key": "kexec_load_disabled", + "source": "/proc/sys/kernel/kexec_load_disabled", + "value": "0\n" + }, { + "key": "gc_delay", + "source": "/proc/sys/kernel/keys/gc_delay", + "value": "300\n" + }, { + "key": "maxbytes", + "source": "/proc/sys/kernel/keys/maxbytes", + "value": "20000\n" + }, { + "key": "maxkeys", + "source": "/proc/sys/kernel/keys/maxkeys", + "value": "200\n" + }, { + "key": "persistent_keyring_expiry", + "source": "/proc/sys/kernel/keys/persistent_keyring_expiry", + "value": "259200\n" + }, { + "key": "root_maxbytes", + "source": "/proc/sys/kernel/keys/root_maxbytes", + "value": "25000000\n" + }, { + "key": "root_maxkeys", + "source": "/proc/sys/kernel/keys/root_maxkeys", + "value": "1000000\n" + }, { + "key": "kptr_restrict", + "source": "/proc/sys/kernel/kptr_restrict", + "value": "1\n" + }, { + "key": "max_lock_depth", + "source": "/proc/sys/kernel/max_lock_depth", + "value": "1024\n" + }, { + "key": "max_rcu_stall_to_panic", + "source": "/proc/sys/kernel/max_rcu_stall_to_panic", + "value": "0\n" + }, { + "key": "modprobe", + "source": "/proc/sys/kernel/modprobe", + "value": "/sbin/modprobe\n" + }, { + "key": "modules_disabled", + "source": "/proc/sys/kernel/modules_disabled", + "value": "0\n" + }, { + "key": "msg_next_id", + "source": "/proc/sys/kernel/msg_next_id", + "value": "-1\n" + }, { + "key": "msgmax", + "source": "/proc/sys/kernel/msgmax", + "value": "8192\n" + }, { + "key": "msgmnb", + "source": "/proc/sys/kernel/msgmnb", + "value": "16384\n" + }, { + "key": "msgmni", + "source": "/proc/sys/kernel/msgmni", + "value": "32000\n" + }, { + "key": "ngroups_max", + "source": "/proc/sys/kernel/ngroups_max", + "value": "65536\n" + }, { + "key": "nmi_watchdog", + "source": "/proc/sys/kernel/nmi_watchdog", + "value": "0\n" + }, { + "key": "ns_last_pid", + "source": "/proc/sys/kernel/ns_last_pid", + "value": "17618\n" + }, { + "key": "numa_balancing", + "source": "/proc/sys/kernel/numa_balancing", + "value": "0\n" + }, { + "key": "oops_all_cpu_backtrace", + "source": "/proc/sys/kernel/oops_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "osrelease", + "source": "/proc/sys/kernel/osrelease", + "value": "5.13.0-39-generic\n" + }, { + "key": "ostype", + "source": "/proc/sys/kernel/ostype", + "value": "Linux\n" + }, { + "key": "overflowgid", + "source": "/proc/sys/kernel/overflowgid", + "value": "65534\n" + }, { + "key": "overflowuid", + "source": "/proc/sys/kernel/overflowuid", + "value": "65534\n" + }, { + "key": "panic", + "source": "/proc/sys/kernel/panic", + "value": "10\n" + }, { + "key": "panic_on_io_nmi", + "source": "/proc/sys/kernel/panic_on_io_nmi", + "value": "0\n" + }, { + "key": "panic_on_oops", + "source": "/proc/sys/kernel/panic_on_oops", + "value": "1\n" + }, { + "key": "panic_on_rcu_stall", + "source": "/proc/sys/kernel/panic_on_rcu_stall", + "value": "0\n" + }, { + "key": "panic_on_unrecovered_nmi", + "source": "/proc/sys/kernel/panic_on_unrecovered_nmi", + "value": "0\n" + }, { + "key": "panic_on_warn", + "source": "/proc/sys/kernel/panic_on_warn", + "value": "0\n" + }, { + "key": "panic_print", + "source": "/proc/sys/kernel/panic_print", + "value": "0\n" + }, { + "key": "perf_cpu_time_max_percent", + "source": "/proc/sys/kernel/perf_cpu_time_max_percent", + "value": "25\n" + }, { + "key": "perf_event_max_contexts_per_stack", + "source": "/proc/sys/kernel/perf_event_max_contexts_per_stack", + "value": "8\n" + }, { + "key": "perf_event_max_sample_rate", + "source": "/proc/sys/kernel/perf_event_max_sample_rate", + "value": "100000\n" + }, { + "key": "perf_event_max_stack", + "source": "/proc/sys/kernel/perf_event_max_stack", + "value": "127\n" + }, { + "key": "perf_event_mlock_kb", + "source": "/proc/sys/kernel/perf_event_mlock_kb", + "value": "516\n" + }, { + "key": "perf_event_paranoid", + "source": "/proc/sys/kernel/perf_event_paranoid", + "value": "4\n" + }, { + "key": "pid_max", + "source": "/proc/sys/kernel/pid_max", + "value": "4194304\n" + }, { + "key": "poweroff_cmd", + "source": "/proc/sys/kernel/poweroff_cmd", + "value": "/sbin/poweroff\n" + }, { + "key": "print-fatal-signals", + "source": "/proc/sys/kernel/print-fatal-signals", + "value": "0\n" + }, { + "key": "printk", + "source": "/proc/sys/kernel/printk", + "value": "4\t4\t1\t7\n" + }, { + "key": "printk_delay", + "source": "/proc/sys/kernel/printk_delay", + "value": "0\n" + }, { + "key": "printk_devkmsg", + "source": "/proc/sys/kernel/printk_devkmsg", + "value": "on\n" + }, { + "key": "printk_ratelimit", + "source": "/proc/sys/kernel/printk_ratelimit", + "value": "5\n" + }, { + "key": "printk_ratelimit_burst", + "source": "/proc/sys/kernel/printk_ratelimit_burst", + "value": "10\n" + }, { + "key": "max", + "source": "/proc/sys/kernel/pty/max", + "value": "4096\n" + }, { + "key": "nr", + "source": "/proc/sys/kernel/pty/nr", + "value": "4\n" + }, { + "key": "reserve", + "source": "/proc/sys/kernel/pty/reserve", + "value": "1024\n" + }, { + "key": "boot_id", + "source": "/proc/sys/kernel/random/boot_id", + "value": "a025a04b-23a2-44b6-aa3a-2b3d3650bcbb\n" + }, { + "key": "entropy_avail", + "source": "/proc/sys/kernel/random/entropy_avail", + "value": "3806\n" + }, { + "key": "poolsize", + "source": "/proc/sys/kernel/random/poolsize", + "value": "4096\n" + }, { + "key": "urandom_min_reseed_secs", + "source": "/proc/sys/kernel/random/urandom_min_reseed_secs", + "value": "60\n" + }, { + "key": "uuid", + "source": "/proc/sys/kernel/random/uuid", + "value": "7b6b5bf9-9af4-49db-aba6-f0be1c57e2b8\n" + }, { + "key": "write_wakeup_threshold", + "source": "/proc/sys/kernel/random/write_wakeup_threshold", + "value": "896\n" + }, { + "key": "randomize_va_space", + "source": "/proc/sys/kernel/randomize_va_space", + "value": "2\n" + }, { + "key": "real-root-dev", + "source": "/proc/sys/kernel/real-root-dev", + "value": "0\n" + }, { + "key": "sched_autogroup_enabled", + "source": "/proc/sys/kernel/sched_autogroup_enabled", + "value": "1\n" + }, { + "key": "sched_cfs_bandwidth_slice_us", + "source": "/proc/sys/kernel/sched_cfs_bandwidth_slice_us", + "value": "5000\n" + }, { + "key": "sched_child_runs_first", + "source": "/proc/sys/kernel/sched_child_runs_first", + "value": "0\n" + }, { + "key": "sched_deadline_period_max_us", + "source": "/proc/sys/kernel/sched_deadline_period_max_us", + "value": "4194304\n" + }, { + "key": "sched_deadline_period_min_us", + "source": "/proc/sys/kernel/sched_deadline_period_min_us", + "value": "100\n" + }, { + "key": "sched_energy_aware", + "source": "/proc/sys/kernel/sched_energy_aware", + "value": "1\n" + }, { + "key": "sched_rr_timeslice_ms", + "source": "/proc/sys/kernel/sched_rr_timeslice_ms", + "value": "100\n" + }, { + "key": "sched_rt_period_us", + "source": "/proc/sys/kernel/sched_rt_period_us", + "value": "1000000\n" + }, { + "key": "sched_rt_runtime_us", + "source": "/proc/sys/kernel/sched_rt_runtime_us", + "value": "950000\n" + }, { + "key": "sched_schedstats", + "source": "/proc/sys/kernel/sched_schedstats", + "value": "0\n" + }, { + "key": "sched_util_clamp_max", + "source": "/proc/sys/kernel/sched_util_clamp_max", + "value": "1024\n" + }, { + "key": "sched_util_clamp_min", + "source": "/proc/sys/kernel/sched_util_clamp_min", + "value": "1024\n" + }, { + "key": "sched_util_clamp_min_rt_default", + "source": "/proc/sys/kernel/sched_util_clamp_min_rt_default", + "value": "1024\n" + }, { + "key": "actions_avail", + "source": "/proc/sys/kernel/seccomp/actions_avail", + "value": "kill_process kill_thread trap errno user_notif trace log allow\n" + }, { + "key": "actions_logged", + "source": "/proc/sys/kernel/seccomp/actions_logged", + "value": "kill_process kill_thread trap errno user_notif trace log\n" + }, { + "key": "sem", + "source": "/proc/sys/kernel/sem", + "value": "32000\t1024000000\t500\t32000\n" + }, { + "key": "sem_next_id", + "source": "/proc/sys/kernel/sem_next_id", + "value": "-1\n" + }, { + "key": "sg-big-buff", + "source": "/proc/sys/kernel/sg-big-buff", + "value": "32768\n" + }, { + "key": "shm_next_id", + "source": "/proc/sys/kernel/shm_next_id", + "value": "-1\n" + }, { + "key": "shm_rmid_forced", + "source": "/proc/sys/kernel/shm_rmid_forced", + "value": "0\n" + }, { + "key": "shmall", + "source": "/proc/sys/kernel/shmall", + "value": "18446744073692774399\n" + }, { + "key": "shmmax", + "source": "/proc/sys/kernel/shmmax", + "value": "18446744073692774399\n" + }, { + "key": "shmmni", + "source": "/proc/sys/kernel/shmmni", + "value": "4096\n" + }, { + "key": "soft_watchdog", + "source": "/proc/sys/kernel/soft_watchdog", + "value": "1\n" + }, { + "key": "softlockup_all_cpu_backtrace", + "source": "/proc/sys/kernel/softlockup_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "softlockup_panic", + "source": "/proc/sys/kernel/softlockup_panic", + "value": "0\n" + }, { + "key": "stack_tracer_enabled", + "source": "/proc/sys/kernel/stack_tracer_enabled", + "value": "0\n" + }, { + "key": "sysctl_writes_strict", + "source": "/proc/sys/kernel/sysctl_writes_strict", + "value": "1\n" + }, { + "key": "sysrq", + "source": "/proc/sys/kernel/sysrq", + "value": "176\n" + }, { + "key": "tainted", + "source": "/proc/sys/kernel/tainted", + "value": "12288\n" + }, { + "key": "threads-max", + "source": "/proc/sys/kernel/threads-max", + "value": "80984\n" + }, { + "key": "timer_migration", + "source": "/proc/sys/kernel/timer_migration", + "value": "1\n" + }, { + "key": "traceoff_on_warning", + "source": "/proc/sys/kernel/traceoff_on_warning", + "value": "0\n" + }, { + "key": "tracepoint_printk", + "source": "/proc/sys/kernel/tracepoint_printk", + "value": "0\n" + }, { + "key": "unknown_nmi_panic", + "source": "/proc/sys/kernel/unknown_nmi_panic", + "value": "0\n" + }, { + "key": "unprivileged_bpf_disabled", + "source": "/proc/sys/kernel/unprivileged_bpf_disabled", + "value": "2\n" + }, { + "key": "unprivileged_userns_apparmor_policy", + "source": "/proc/sys/kernel/unprivileged_userns_apparmor_policy", + "value": "1\n" + }, { + "key": "unprivileged_userns_clone", + "source": "/proc/sys/kernel/unprivileged_userns_clone", + "value": "1\n" + }, { + "key": "bset", + "source": "/proc/sys/kernel/usermodehelper/bset", + "value": "4294967295\t511\n" + }, { + "key": "inheritable", + "source": "/proc/sys/kernel/usermodehelper/inheritable", + "value": "4294967295\t511\n" + }, { + "key": "version", + "source": "/proc/sys/kernel/version", + "value": "#44~20.04.1-Ubuntu SMP Thu Mar 24 16:43:35 UTC 2022\n" + }, { + "key": "watchdog", + "source": "/proc/sys/kernel/watchdog", + "value": "1\n" + }, { + "key": "watchdog_cpumask", + "source": "/proc/sys/kernel/watchdog_cpumask", + "value": "0-3\n" + }, { + "key": "watchdog_thresh", + "source": "/proc/sys/kernel/watchdog_thresh", + "value": "10\n" + }, { + "key": "ptrace_scope", + "source": "/proc/sys/kernel/yama/ptrace_scope", + "value": "1\n" + }], + "kind": "LinuxKernelVariables", + "metadata": { + "name": "minikube" + } +} \ No newline at end of file diff --git a/rules/CVE-2022-0185/test/test/input/node.json b/rules/CVE-2022-0185/test/test_generic_fail/input/node.json similarity index 100% rename from rules/CVE-2022-0185/test/test/input/node.json rename to rules/CVE-2022-0185/test/test_generic_fail/input/node.json diff --git a/rules/CVE-2022-0185/test/test_generic_pass/expected.json b/rules/CVE-2022-0185/test/test_generic_pass/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/CVE-2022-0185/test/test_generic_pass/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/CVE-2022-0185/test/test_generic_pass/input/kernelvars.json b/rules/CVE-2022-0185/test/test_generic_pass/input/kernelvars.json new file mode 100644 index 000000000..bc5569e94 --- /dev/null +++ b/rules/CVE-2022-0185/test/test_generic_pass/input/kernelvars.json @@ -0,0 +1,536 @@ +{ + "apiVersion": "hostdata.kubescape.cloud/v1beta0", + "data": [{ + "key": "acct", + "source": "/proc/sys/kernel/acct", + "value": "4\t2\t30\n" + }, { + "key": "acpi_video_flags", + "source": "/proc/sys/kernel/acpi_video_flags", + "value": "0\n" + }, { + "key": "apparmor_display_secid_mode", + "source": "/proc/sys/kernel/apparmor_display_secid_mode", + "value": "0\n" + }, { + "key": "auto_msgmni", + "source": "/proc/sys/kernel/auto_msgmni", + "value": "0\n" + }, { + "key": "bootloader_type", + "source": "/proc/sys/kernel/bootloader_type", + "value": "114\n" + }, { + "key": "bootloader_version", + "source": "/proc/sys/kernel/bootloader_version", + "value": "2\n" + }, { + "key": "bpf_stats_enabled", + "source": "/proc/sys/kernel/bpf_stats_enabled", + "value": "0\n" + }, { + "key": "cad_pid", + "source": "/proc/sys/kernel/cad_pid", + "value": "0\n" + }, { + "key": "cap_last_cap", + "source": "/proc/sys/kernel/cap_last_cap", + "value": "40\n" + }, { + "key": "core_pattern", + "source": "/proc/sys/kernel/core_pattern", + "value": "|/usr/share/apport/apport %p %s %c %d %P %E\n" + }, { + "key": "core_pipe_limit", + "source": "/proc/sys/kernel/core_pipe_limit", + "value": "0\n" + }, { + "key": "core_uses_pid", + "source": "/proc/sys/kernel/core_uses_pid", + "value": "0\n" + }, { + "key": "ctrl-alt-del", + "source": "/proc/sys/kernel/ctrl-alt-del", + "value": "0\n" + }, { + "key": "dmesg_restrict", + "source": "/proc/sys/kernel/dmesg_restrict", + "value": "0\n" + }, { + "key": "domainname", + "source": "/proc/sys/kernel/domainname", + "value": "(none)\n" + }, { + "key": "force_sysfs_fallback", + "source": "/proc/sys/kernel/firmware_config/force_sysfs_fallback", + "value": "0\n" + }, { + "key": "ignore_sysfs_fallback", + "source": "/proc/sys/kernel/firmware_config/ignore_sysfs_fallback", + "value": "0\n" + }, { + "key": "ftrace_dump_on_oops", + "source": "/proc/sys/kernel/ftrace_dump_on_oops", + "value": "0\n" + }, { + "key": "ftrace_enabled", + "source": "/proc/sys/kernel/ftrace_enabled", + "value": "1\n" + }, { + "key": "hardlockup_all_cpu_backtrace", + "source": "/proc/sys/kernel/hardlockup_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "hardlockup_panic", + "source": "/proc/sys/kernel/hardlockup_panic", + "value": "0\n" + }, { + "key": "hostname", + "source": "/proc/sys/kernel/hostname", + "value": "minikube\n" + }, { + "key": "hotplug", + "source": "/proc/sys/kernel/hotplug", + "value": "\n" + }, { + "key": "hung_task_all_cpu_backtrace", + "source": "/proc/sys/kernel/hung_task_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "hung_task_check_count", + "source": "/proc/sys/kernel/hung_task_check_count", + "value": "4194304\n" + }, { + "key": "hung_task_check_interval_secs", + "source": "/proc/sys/kernel/hung_task_check_interval_secs", + "value": "0\n" + }, { + "key": "hung_task_panic", + "source": "/proc/sys/kernel/hung_task_panic", + "value": "0\n" + }, { + "key": "hung_task_timeout_secs", + "source": "/proc/sys/kernel/hung_task_timeout_secs", + "value": "120\n" + }, { + "key": "hung_task_warnings", + "source": "/proc/sys/kernel/hung_task_warnings", + "value": "10\n" + }, { + "key": "io_delay_type", + "source": "/proc/sys/kernel/io_delay_type", + "value": "1\n" + }, { + "key": "kexec_load_disabled", + "source": "/proc/sys/kernel/kexec_load_disabled", + "value": "0\n" + }, { + "key": "gc_delay", + "source": "/proc/sys/kernel/keys/gc_delay", + "value": "300\n" + }, { + "key": "maxbytes", + "source": "/proc/sys/kernel/keys/maxbytes", + "value": "20000\n" + }, { + "key": "maxkeys", + "source": "/proc/sys/kernel/keys/maxkeys", + "value": "200\n" + }, { + "key": "persistent_keyring_expiry", + "source": "/proc/sys/kernel/keys/persistent_keyring_expiry", + "value": "259200\n" + }, { + "key": "root_maxbytes", + "source": "/proc/sys/kernel/keys/root_maxbytes", + "value": "25000000\n" + }, { + "key": "root_maxkeys", + "source": "/proc/sys/kernel/keys/root_maxkeys", + "value": "1000000\n" + }, { + "key": "kptr_restrict", + "source": "/proc/sys/kernel/kptr_restrict", + "value": "1\n" + }, { + "key": "max_lock_depth", + "source": "/proc/sys/kernel/max_lock_depth", + "value": "1024\n" + }, { + "key": "max_rcu_stall_to_panic", + "source": "/proc/sys/kernel/max_rcu_stall_to_panic", + "value": "0\n" + }, { + "key": "modprobe", + "source": "/proc/sys/kernel/modprobe", + "value": "/sbin/modprobe\n" + }, { + "key": "modules_disabled", + "source": "/proc/sys/kernel/modules_disabled", + "value": "0\n" + }, { + "key": "msg_next_id", + "source": "/proc/sys/kernel/msg_next_id", + "value": "-1\n" + }, { + "key": "msgmax", + "source": "/proc/sys/kernel/msgmax", + "value": "8192\n" + }, { + "key": "msgmnb", + "source": "/proc/sys/kernel/msgmnb", + "value": "16384\n" + }, { + "key": "msgmni", + "source": "/proc/sys/kernel/msgmni", + "value": "32000\n" + }, { + "key": "ngroups_max", + "source": "/proc/sys/kernel/ngroups_max", + "value": "65536\n" + }, { + "key": "nmi_watchdog", + "source": "/proc/sys/kernel/nmi_watchdog", + "value": "0\n" + }, { + "key": "ns_last_pid", + "source": "/proc/sys/kernel/ns_last_pid", + "value": "17618\n" + }, { + "key": "numa_balancing", + "source": "/proc/sys/kernel/numa_balancing", + "value": "0\n" + }, { + "key": "oops_all_cpu_backtrace", + "source": "/proc/sys/kernel/oops_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "osrelease", + "source": "/proc/sys/kernel/osrelease", + "value": "5.13.0-39-generic\n" + }, { + "key": "ostype", + "source": "/proc/sys/kernel/ostype", + "value": "Linux\n" + }, { + "key": "overflowgid", + "source": "/proc/sys/kernel/overflowgid", + "value": "65534\n" + }, { + "key": "overflowuid", + "source": "/proc/sys/kernel/overflowuid", + "value": "65534\n" + }, { + "key": "panic", + "source": "/proc/sys/kernel/panic", + "value": "10\n" + }, { + "key": "panic_on_io_nmi", + "source": "/proc/sys/kernel/panic_on_io_nmi", + "value": "0\n" + }, { + "key": "panic_on_oops", + "source": "/proc/sys/kernel/panic_on_oops", + "value": "1\n" + }, { + "key": "panic_on_rcu_stall", + "source": "/proc/sys/kernel/panic_on_rcu_stall", + "value": "0\n" + }, { + "key": "panic_on_unrecovered_nmi", + "source": "/proc/sys/kernel/panic_on_unrecovered_nmi", + "value": "0\n" + }, { + "key": "panic_on_warn", + "source": "/proc/sys/kernel/panic_on_warn", + "value": "0\n" + }, { + "key": "panic_print", + "source": "/proc/sys/kernel/panic_print", + "value": "0\n" + }, { + "key": "perf_cpu_time_max_percent", + "source": "/proc/sys/kernel/perf_cpu_time_max_percent", + "value": "25\n" + }, { + "key": "perf_event_max_contexts_per_stack", + "source": "/proc/sys/kernel/perf_event_max_contexts_per_stack", + "value": "8\n" + }, { + "key": "perf_event_max_sample_rate", + "source": "/proc/sys/kernel/perf_event_max_sample_rate", + "value": "100000\n" + }, { + "key": "perf_event_max_stack", + "source": "/proc/sys/kernel/perf_event_max_stack", + "value": "127\n" + }, { + "key": "perf_event_mlock_kb", + "source": "/proc/sys/kernel/perf_event_mlock_kb", + "value": "516\n" + }, { + "key": "perf_event_paranoid", + "source": "/proc/sys/kernel/perf_event_paranoid", + "value": "4\n" + }, { + "key": "pid_max", + "source": "/proc/sys/kernel/pid_max", + "value": "4194304\n" + }, { + "key": "poweroff_cmd", + "source": "/proc/sys/kernel/poweroff_cmd", + "value": "/sbin/poweroff\n" + }, { + "key": "print-fatal-signals", + "source": "/proc/sys/kernel/print-fatal-signals", + "value": "0\n" + }, { + "key": "printk", + "source": "/proc/sys/kernel/printk", + "value": "4\t4\t1\t7\n" + }, { + "key": "printk_delay", + "source": "/proc/sys/kernel/printk_delay", + "value": "0\n" + }, { + "key": "printk_devkmsg", + "source": "/proc/sys/kernel/printk_devkmsg", + "value": "on\n" + }, { + "key": "printk_ratelimit", + "source": "/proc/sys/kernel/printk_ratelimit", + "value": "5\n" + }, { + "key": "printk_ratelimit_burst", + "source": "/proc/sys/kernel/printk_ratelimit_burst", + "value": "10\n" + }, { + "key": "max", + "source": "/proc/sys/kernel/pty/max", + "value": "4096\n" + }, { + "key": "nr", + "source": "/proc/sys/kernel/pty/nr", + "value": "4\n" + }, { + "key": "reserve", + "source": "/proc/sys/kernel/pty/reserve", + "value": "1024\n" + }, { + "key": "boot_id", + "source": "/proc/sys/kernel/random/boot_id", + "value": "a025a04b-23a2-44b6-aa3a-2b3d3650bcbb\n" + }, { + "key": "entropy_avail", + "source": "/proc/sys/kernel/random/entropy_avail", + "value": "3806\n" + }, { + "key": "poolsize", + "source": "/proc/sys/kernel/random/poolsize", + "value": "4096\n" + }, { + "key": "urandom_min_reseed_secs", + "source": "/proc/sys/kernel/random/urandom_min_reseed_secs", + "value": "60\n" + }, { + "key": "uuid", + "source": "/proc/sys/kernel/random/uuid", + "value": "7b6b5bf9-9af4-49db-aba6-f0be1c57e2b8\n" + }, { + "key": "write_wakeup_threshold", + "source": "/proc/sys/kernel/random/write_wakeup_threshold", + "value": "896\n" + }, { + "key": "randomize_va_space", + "source": "/proc/sys/kernel/randomize_va_space", + "value": "2\n" + }, { + "key": "real-root-dev", + "source": "/proc/sys/kernel/real-root-dev", + "value": "0\n" + }, { + "key": "sched_autogroup_enabled", + "source": "/proc/sys/kernel/sched_autogroup_enabled", + "value": "1\n" + }, { + "key": "sched_cfs_bandwidth_slice_us", + "source": "/proc/sys/kernel/sched_cfs_bandwidth_slice_us", + "value": "5000\n" + }, { + "key": "sched_child_runs_first", + "source": "/proc/sys/kernel/sched_child_runs_first", + "value": "0\n" + }, { + "key": "sched_deadline_period_max_us", + "source": "/proc/sys/kernel/sched_deadline_period_max_us", + "value": "4194304\n" + }, { + "key": "sched_deadline_period_min_us", + "source": "/proc/sys/kernel/sched_deadline_period_min_us", + "value": "100\n" + }, { + "key": "sched_energy_aware", + "source": "/proc/sys/kernel/sched_energy_aware", + "value": "1\n" + }, { + "key": "sched_rr_timeslice_ms", + "source": "/proc/sys/kernel/sched_rr_timeslice_ms", + "value": "100\n" + }, { + "key": "sched_rt_period_us", + "source": "/proc/sys/kernel/sched_rt_period_us", + "value": "1000000\n" + }, { + "key": "sched_rt_runtime_us", + "source": "/proc/sys/kernel/sched_rt_runtime_us", + "value": "950000\n" + }, { + "key": "sched_schedstats", + "source": "/proc/sys/kernel/sched_schedstats", + "value": "0\n" + }, { + "key": "sched_util_clamp_max", + "source": "/proc/sys/kernel/sched_util_clamp_max", + "value": "1024\n" + }, { + "key": "sched_util_clamp_min", + "source": "/proc/sys/kernel/sched_util_clamp_min", + "value": "1024\n" + }, { + "key": "sched_util_clamp_min_rt_default", + "source": "/proc/sys/kernel/sched_util_clamp_min_rt_default", + "value": "1024\n" + }, { + "key": "actions_avail", + "source": "/proc/sys/kernel/seccomp/actions_avail", + "value": "kill_process kill_thread trap errno user_notif trace log allow\n" + }, { + "key": "actions_logged", + "source": "/proc/sys/kernel/seccomp/actions_logged", + "value": "kill_process kill_thread trap errno user_notif trace log\n" + }, { + "key": "sem", + "source": "/proc/sys/kernel/sem", + "value": "32000\t1024000000\t500\t32000\n" + }, { + "key": "sem_next_id", + "source": "/proc/sys/kernel/sem_next_id", + "value": "-1\n" + }, { + "key": "sg-big-buff", + "source": "/proc/sys/kernel/sg-big-buff", + "value": "32768\n" + }, { + "key": "shm_next_id", + "source": "/proc/sys/kernel/shm_next_id", + "value": "-1\n" + }, { + "key": "shm_rmid_forced", + "source": "/proc/sys/kernel/shm_rmid_forced", + "value": "0\n" + }, { + "key": "shmall", + "source": "/proc/sys/kernel/shmall", + "value": "18446744073692774399\n" + }, { + "key": "shmmax", + "source": "/proc/sys/kernel/shmmax", + "value": "18446744073692774399\n" + }, { + "key": "shmmni", + "source": "/proc/sys/kernel/shmmni", + "value": "4096\n" + }, { + "key": "soft_watchdog", + "source": "/proc/sys/kernel/soft_watchdog", + "value": "1\n" + }, { + "key": "softlockup_all_cpu_backtrace", + "source": "/proc/sys/kernel/softlockup_all_cpu_backtrace", + "value": "0\n" + }, { + "key": "softlockup_panic", + "source": "/proc/sys/kernel/softlockup_panic", + "value": "0\n" + }, { + "key": "stack_tracer_enabled", + "source": "/proc/sys/kernel/stack_tracer_enabled", + "value": "0\n" + }, { + "key": "sysctl_writes_strict", + "source": "/proc/sys/kernel/sysctl_writes_strict", + "value": "1\n" + }, { + "key": "sysrq", + "source": "/proc/sys/kernel/sysrq", + "value": "176\n" + }, { + "key": "tainted", + "source": "/proc/sys/kernel/tainted", + "value": "12288\n" + }, { + "key": "threads-max", + "source": "/proc/sys/kernel/threads-max", + "value": "80984\n" + }, { + "key": "timer_migration", + "source": "/proc/sys/kernel/timer_migration", + "value": "1\n" + }, { + "key": "traceoff_on_warning", + "source": "/proc/sys/kernel/traceoff_on_warning", + "value": "0\n" + }, { + "key": "tracepoint_printk", + "source": "/proc/sys/kernel/tracepoint_printk", + "value": "0\n" + }, { + "key": "unknown_nmi_panic", + "source": "/proc/sys/kernel/unknown_nmi_panic", + "value": "0\n" + }, { + "key": "unprivileged_bpf_disabled", + "source": "/proc/sys/kernel/unprivileged_bpf_disabled", + "value": "2\n" + }, { + "key": "unprivileged_userns_apparmor_policy", + "source": "/proc/sys/kernel/unprivileged_userns_apparmor_policy", + "value": "1\n" + }, { + "key": "unprivileged_userns_clone", + "source": "/proc/sys/kernel/unprivileged_userns_clone", + "value": "0\n" + }, { + "key": "bset", + "source": "/proc/sys/kernel/usermodehelper/bset", + "value": "4294967295\t511\n" + }, { + "key": "inheritable", + "source": "/proc/sys/kernel/usermodehelper/inheritable", + "value": "4294967295\t511\n" + }, { + "key": "version", + "source": "/proc/sys/kernel/version", + "value": "#44~20.04.1-Ubuntu SMP Thu Mar 24 16:43:35 UTC 2022\n" + }, { + "key": "watchdog", + "source": "/proc/sys/kernel/watchdog", + "value": "1\n" + }, { + "key": "watchdog_cpumask", + "source": "/proc/sys/kernel/watchdog_cpumask", + "value": "0-3\n" + }, { + "key": "watchdog_thresh", + "source": "/proc/sys/kernel/watchdog_thresh", + "value": "10\n" + }, { + "key": "ptrace_scope", + "source": "/proc/sys/kernel/yama/ptrace_scope", + "value": "1\n" + }], + "kind": "LinuxKernelVariables", + "metadata": { + "name": "minikube" + } +} \ No newline at end of file diff --git a/rules/CVE-2022-0185/test/test_generic_pass/input/node.json b/rules/CVE-2022-0185/test/test_generic_pass/input/node.json new file mode 100644 index 000000000..b483df64f --- /dev/null +++ b/rules/CVE-2022-0185/test/test_generic_pass/input/node.json @@ -0,0 +1,264 @@ +{ + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-04-26T05:54:17Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube", + "kubernetes.io/os": "linux", + "minikube.k8s.io/commit": "3e64b11ed75e56e4898ea85f96b2e4af0301f43d", + "minikube.k8s.io/name": "minikube", + "minikube.k8s.io/updated_at": "2022_04_26T08_54_20_0700", + "minikube.k8s.io/version": "v1.25.1", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "", + "node.kubernetes.io/exclude-from-external-load-balancers": "" + }, + "managedFields": [{ + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:metadata": { + "f:annotations": { + ".": {}, + "f:kubeadm.alpha.kubernetes.io/cri-socket": {}, + "f:volumes.kubernetes.io/controller-managed-attach-detach": {} + }, + "f:labels": { + ".": {}, + "f:beta.kubernetes.io/arch": {}, + "f:beta.kubernetes.io/os": {}, + "f:kubernetes.io/arch": {}, + "f:kubernetes.io/hostname": {}, + "f:kubernetes.io/os": {}, + "f:node-role.kubernetes.io/control-plane": {}, + "f:node-role.kubernetes.io/master": {}, + "f:node.kubernetes.io/exclude-from-external-load-balancers": {} + } + } + }, + "manager": "Go-http-client", + "operation": "Update", + "time": "2022-04-26T05:54:20Z" + }, { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:metadata": { + "f:labels": { + "f:minikube.k8s.io/commit": {}, + "f:minikube.k8s.io/name": {}, + "f:minikube.k8s.io/updated_at": {}, + "f:minikube.k8s.io/version": {} + } + } + }, + "manager": "kubectl-label", + "operation": "Update", + "time": "2022-04-26T05:54:21Z" + }, { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:status": { + "f:conditions": { + "k:{\"type\":\"DiskPressure\"}": { + "f:lastHeartbeatTime": {} + }, + "k:{\"type\":\"MemoryPressure\"}": { + "f:lastHeartbeatTime": {} + }, + "k:{\"type\":\"PIDPressure\"}": { + "f:lastHeartbeatTime": {} + }, + "k:{\"type\":\"Ready\"}": { + "f:lastHeartbeatTime": {}, + "f:lastTransitionTime": {}, + "f:message": {}, + "f:reason": {}, + "f:status": {} + } + } + } + }, + "manager": "Go-http-client", + "operation": "Update", + "subresource": "status", + "time": "2022-04-26T05:54:31Z" + }, { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:metadata": { + "f:annotations": { + "f:node.alpha.kubernetes.io/ttl": {} + } + }, + "f:spec": { + "f:podCIDR": {}, + "f:podCIDRs": { + ".": {}, + "v:\"10.244.0.0/24\"": {} + } + } + }, + "manager": "kube-controller-manager", + "operation": "Update", + "time": "2022-04-26T05:54:33Z" + }], + "name": "minikube", + "resourceVersion": "4245", + "uid": "5a3a25d4-b1e5-42d3-a533-4d36f084314e" + }, + "spec": { + "podCIDR": "10.244.0.0/24", + "podCIDRs": ["10.244.0.0/24"] + }, + "status": { + "addresses": [{ + "address": "192.168.49.2", + "type": "InternalIP" + }, { + "address": "minikube", + "type": "Hostname" + }], + "allocatable": { + "cpu": "4", + "ephemeral-storage": "94850516Ki", + "hugepages-2Mi": "0", + "memory": "10432976Ki", + "pods": "110" + }, + "capacity": { + "cpu": "4", + "ephemeral-storage": "94850516Ki", + "hugepages-2Mi": "0", + "memory": "10432976Ki", + "pods": "110" + }, + "conditions": [{ + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:14Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, { + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:14Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, { + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:14Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, { + "lastHeartbeatTime": "2022-04-26T07:21:25Z", + "lastTransitionTime": "2022-04-26T05:54:31Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + }], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [{ + "names": ["quay.io/armosec/k8s-ca-vuln-scan-ubi@sha256:275fa8a7a1e58cbd3c94bbf6c6a423970d6b44c5355021f2a7ca937563c26593", "quay.io/armosec/k8s-ca-vuln-scan-ubi:127"], + "sizeBytes": 1018599142 + }, { + "names": ["gcr.io/google-samples/node-hello@sha256:d238d0ab54efb76ec0f7b1da666cefa9b40be59ef34346a761b8adc2dd45459b", "gcr.io/google-samples/node-hello:1.0"], + "sizeBytes": 643762709 + }, { + "names": ["requarks/wiki@sha256:dd83fff15e77843ff934b25c28c865ac000edf7653e5d11adad1dd51df87439d"], + "sizeBytes": 441083858 + }, { + "names": ["mariadb@sha256:821d0411208eaa88f9e1f0daccd1d534f88d19baf724eb9a2777cbedb10b6c66"], + "sizeBytes": 400782682 + }, { + "names": ["k8s.gcr.io/etcd@sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263", "k8s.gcr.io/etcd:3.5.1-0"], + "sizeBytes": 292558922 + }, { + "names": ["kubernetesui/dashboard@sha256:ec27f462cf1946220f5a9ace416a84a57c18f98c777876a8054405d1428cc92e", "kubernetesui/dashboard:v2.3.1"], + "sizeBytes": 220033604 + }, { + "names": ["httpd@sha256:94cd479f4875e3e0fba620baf7a0e9353e15783368f4f74b9ea5bdc729b3f366", "httpd:2.4"], + "sizeBytes": 143610390 + }, { + "names": ["quay.io/armosec/k8s-ca-dashboard-aggregator-ubi@sha256:5dd4c701070c0168dda6bf4932f2752212a6b8f9d70c0fa15f10f29d82ed460a", "quay.io/armosec/k8s-ca-dashboard-aggregator-ubi:185"], + "sizeBytes": 138395979 + }, { + "names": ["k8s.gcr.io/kube-apiserver@sha256:f54681a71cce62cbc1b13ebb3dbf1d880f849112789811f98b6aebd2caa2f255", "k8s.gcr.io/kube-apiserver:v1.23.1"], + "sizeBytes": 135162256 + }, { + "names": ["k8s.gcr.io/kube-controller-manager@sha256:a7ed87380108a2d811f0d392a3fe87546c85bc366e0d1e024dfa74eb14468604", "k8s.gcr.io/kube-controller-manager:v1.23.1"], + "sizeBytes": 124971684 + }, { + "names": ["k8s.gcr.io/kube-proxy@sha256:e40f3a28721588affcf187f3f246d1e078157dabe274003eaa2957a83f7170c8", "k8s.gcr.io/kube-proxy:v1.23.1"], + "sizeBytes": 112327826 + }, { + "names": ["quay.io/armosec/notification-server-ubi@sha256:4fc284ba63683e00468b92db20f51c1209ae475a6d0bd53c1b025964876d0eea", "quay.io/armosec/notification-server-ubi:89"], + "sizeBytes": 109413165 + }, { + "names": ["nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d"], + "sizeBytes": 109129446 + }, { + "names": ["quay.io/armosec/kubescape@sha256:b76503638466be6a9b988890202fa00de0e8806819a4a4438328e50abdac270c", "quay.io/armosec/kubescape:v2.0.149"], + "sizeBytes": 55122796 + }, { + "names": ["k8s.gcr.io/kube-scheduler@sha256:8be4eb1593cf9ff2d91b44596633b7815a3753696031a1eb4273d1b39427fa8c", "k8s.gcr.io/kube-scheduler:v1.23.1"], + "sizeBytes": 53488305 + }, { + "names": ["k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e", "k8s.gcr.io/coredns/coredns:v1.8.6"], + "sizeBytes": 46829283 + }, { + "names": ["quay.io/armosec/k8s-ca-websocket-ubi@sha256:a5eba54aeada7d995f83356dcabb6c505e3922016d29246fa0e8a3c179533861", "quay.io/armosec/k8s-ca-websocket-ubi:458"], + "sizeBytes": 45050289 + }, { + "names": ["kubernetesui/metrics-scraper@sha256:36d5b3f60e1a144cc5ada820910535074bdf5cf73fb70d1ff1681537eef4e172", "kubernetesui/metrics-scraper:v1.0.7"], + "sizeBytes": 34446077 + }, { + "names": ["gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944", "gcr.io/k8s-minikube/storage-provisioner:v5"], + "sizeBytes": 31465472 + }, { + "names": ["quay.io/armosec/kube-host-sensor@sha256:b592a099c72c5f7ccc9da011b9c9f3297e7a60f5910a20f994c9dfa6142d9204"], + "sizeBytes": 11807596 + }, { + "names": ["quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee", "quay.io/armosec/kube-host-sensor:latest"], + "sizeBytes": 11796788 + }, { + "names": ["busybox@sha256:caa382c432891547782ce7140fb3b7304613d3b0438834dce1cad68896ab110a", "busybox:latest"], + "sizeBytes": 1239748 + }, { + "names": ["k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db", "k8s.gcr.io/pause:3.6"], + "sizeBytes": 682696 + }], + "nodeInfo": { + "architecture": "amd64", + "bootID": "a025a04b-23a2-44b6-aa3a-2b3d3650bcbb", + "containerRuntimeVersion": "docker://20.10.12", + "kernelVersion": "5.13.0-39-generic", + "kubeProxyVersion": "v1.23.1", + "kubeletVersion": "v1.23.1", + "machineID": "8de776e053e140d6a14c2d2def3d6bb8", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.2 LTS", + "systemUUID": "8d013ac0-0dbc-4c34-b2bd-0365fd0fd31c" + } + } +} \ No newline at end of file From 534f4f6b81dcc2785b1dea963b43b3b4b6f1ed86 Mon Sep 17 00:00:00 2001 From: Rohit Date: Tue, 21 Nov 2023 19:09:29 +0530 Subject: [PATCH 087/195] feat: controls docs publishing pipeline Signed-off-by: Rohit --- .github/sync.yml | 3 + .github/workflows/create-release.yaml | 4 +- .github/workflows/sync.yml | 24 ++ scripts/mk-generator.py | 417 ++++++++++++++++++++++++++ scripts/upload-readme.py | 161 ++++++++-- 5 files changed, 582 insertions(+), 27 deletions(-) create mode 100644 .github/sync.yml create mode 100644 .github/workflows/sync.yml create mode 100644 scripts/mk-generator.py diff --git a/.github/sync.yml b/.github/sync.yml new file mode 100644 index 000000000..d6a54b3dd --- /dev/null +++ b/.github/sync.yml @@ -0,0 +1,3 @@ +kubescape/kubescape.io: + - source: docs/controls + dest: docs/docs/controls/ \ No newline at end of file diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml index 1cba49c17..1031b4b13 100644 --- a/.github/workflows/create-release.yaml +++ b/.github/workflows/create-release.yaml @@ -165,4 +165,6 @@ jobs: env: README_API_KEY: ${{ secrets.README_API_KEY }} run: |- - python ./scripts/upload-readme.py \ No newline at end of file + python ./scripts/upload-readme.py + - name: execute docs generator script + run: python ./scripts/mk-generator.py \ No newline at end of file diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml new file mode 100644 index 000000000..d73907ad5 --- /dev/null +++ b/.github/workflows/sync.yml @@ -0,0 +1,24 @@ +name: Sync Files + +on: + push: + branches: + - master + paths: + - 'docs/controls/**' + workflow_dispatch: + +jobs: + sync: + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@master + - name: Run GitHub File Sync + uses: BetaHuhn/repo-file-sync-action@v1 + with: + GH_PAT: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + COMMIT_BODY: "Sync control library docs from `regolibrary` repo" + PR_BODY: Syncing the Control Library docs from `regolibrary` repository to update the `controls` documentation + PR_LABELS: automerge + COMMIT_PREFIX: "" \ No newline at end of file diff --git a/scripts/mk-generator.py b/scripts/mk-generator.py new file mode 100644 index 000000000..264599bfb --- /dev/null +++ b/scripts/mk-generator.py @@ -0,0 +1,417 @@ +""" +This script is used to generate a markdown file for each control in the `controls` folder. +The generated markdown files are placed into the `docs/controls` directory. +Each markdown file contains detailed information about a control, +such as its severity, description, related resources, test, remediation, and example. +""" + +import os +import json + +def ignore_framework(framework_name: str): + """ + determines whether or not to ignore a framework based on its name. + + Parameters + ---------- + framework_name: the name of the framework + + Returns + -------- + True if the framework should be ignored, False otherwise + + """ + return framework_name == 'YAML-scanning' or framework_name.startswith('developer') + +def get_frameworks_for_control(control): + """ + returns the frameworks a given control conforms to. + + Parameters + ---------- + control: the control object + + Returns + ------- + a list of framework names + + """ + r = [] + # Loop through all the json files in the 'frameworks' directory + for frameworks_json_file_name in filter(lambda fn: fn.endswith('.json'),os.listdir('frameworks')): + framework = json.load(open(os.path.join('frameworks',frameworks_json_file_name))) + if ignore_framework(framework['name']): + continue + + # Under the active controls the framework has, check if the given control is one of them + if "activeControls" in framework: + for activeControl in framework["activeControls"]: + if control['controlID'].lower() == activeControl["controlID"].lower(): + r.append(framework['name']) + return r + +def create_md_for_control(control): + """ + generates a markdown file for a given control. + + Parameters + ---------- + control: the control object + + Returns + ------- + the markdown text/file + + """ + related_resources = set() + control_config_input = {} + host_sensor = False + cloud_control = False + + # Loop through all the rules of the control + for rule_obj in control['rules']: + # If the rule has a 'match' field, add its resources to the related resources + if 'match' in rule_obj: + for match_obj in rule_obj['match']: + if 'resources' in match_obj: + related_resources.update(set(match_obj['resources'])) + # If the rule has a 'controlConfigInputs' field, add its configuration to the control configuration input + if 'controlConfigInputs' in rule_obj: + for control_config in rule_obj['controlConfigInputs']: + control_config_input[control_config['path']] = control_config + # If the rule has a 'attributes' field and it contains 'hostSensorRule', set host_sensor to True + if 'attributes' in rule_obj: + if 'hostSensorRule' in rule_obj['attributes']: + host_sensor = True + # If the rule has a 'relevantCloudProviders' field and it is not empty, set cloud_control to True + if 'relevantCloudProviders' in rule_obj: + cloud_control = len(rule_obj['relevantCloudProviders']) > 0 + + # Start creating the markdown text + md_text = '' + md_text += '# %s - %s\n' % (control['controlID'], control['name']) + '\n' + + if host_sensor: + md_text += '## Prerequisites\n *Run Kubescape with host sensor (see [here](https://hub.armo.cloud/docs/host-sensor))*\n \n' + if cloud_control: + md_text += '## Prerequisites\n *Integrate with cloud provider (see [here](https://hub.armosec.io/docs/kubescape-integration-with-cloud-providers))*\n \n' + frameworks = get_frameworks_for_control(control) + md_text += '## Framework%s\n' % ('s' if len(frameworks) > 1 else '') + md_text += '\n'.join(['* ' + framework for framework in frameworks]) + '\n \n' + md_text += '## Severity\n' + # severity map: https://github.com/kubescape/opa-utils/blob/master/reporthandling/apis/severity.go#L34 + severity_map = {1:'Low',2:'Low',3:'Low',4:'Medium',5:'Medium',6:'Medium',7:'High',8:'High',9:'Critical',10:'Critical'} + md_text += '%s\n' % severity_map[int(control['baseScore'])] + '\n' + if 'long_description' in control or 'description' in control: + description = control['long_description'] if 'long_description' in control else control['description'] + if description.strip(): + md_text += '## Description of the issue\n' + if len(control_config_input): + description += ' Note, [this control is configurable](#configuration-parameters).' + md_text += description + '\n \n' + if related_resources: + md_text += '## Related resources\n' + md_text += ', '.join(sorted(list(related_resources))) + '\n \n' + + md_text += '## What this control tests \n' + test = control['test'] if 'test' in control else control['description'] + md_text += test + '\n \n' + + if 'manual_test' in control and control['manual_test'].strip(): + md_text += '## How to check it manually \n' + manual_test = control['manual_test'] + md_text += manual_test + '\n \n' + + if 'remediation' in control and control['remediation'].strip(): + md_text += '## Remediation\n' + md_text += control['remediation'] + '\n \n' + if 'impact_statement' in control and control['impact_statement'].strip() and control['impact_statement'] != 'None': + md_text += '### Impact Statement\n' + control['impact_statement'] + '\n \n' + if 'default_value' in control and control['default_value'].strip(): + md_text += '### Default Value\n' + control['default_value'] + '\n \n' + + if len(control_config_input): + configuration_text = '## Configuration parameters \n You can adjust the configuration of this control to suit your specific environment. [Read the documentation on configuring controls](../frameworks-and-controls/configuring-controls.md) to learn more.\n \n' + for control_config_name in control_config_input: + control_config = control_config_input[control_config_name] + # configuration_text += '### ' + control_config['name'] + '\n' + config_name = control_config['path'].split('.')[-1] + configuration_text += '* ' '[' + config_name + '](../frameworks-and-controls/configuring-controls.md#%s)'%config_name.lower() + ':' + '\n' + configuration_text += control_config['description'] + '\n \n' + md_text += configuration_text + + if 'example' in control and control['example'].strip(): + md_text += '## Example\n' + md_text += '```\n' + control['example'] + '\n```' + '\n' + return md_text + +def generate_index_md(controls): + """ + Generates the content for the index.md file based on the provided list of controls. + + Parameters + ---------- + controls: A list of control objects. + + Returns + ------- + str: The generated content for the index.md file. + + """ + # Sort the controls list based on control ID + controls.sort(key=lambda control: convert_control_id_to_doc_order(control['controlID'])) + + index_md = "# Control library\n\nEach control in the Kubescape control library is documented under this page.\n\n" + index_md += "| Control | Name | Framework |\n" + index_md += "| --- | --- | --- |\n" + + for control in controls: + control_id = control['controlID'] + control_name = control['name'] + control_frameworks = get_frameworks_for_control(control) + control_link = control_id.lower().replace(".", "-") + ".md" + index_md += "| [%s](%s) | %s | %s |\n" % (control_id, control_link, control_name, ", ".join(control_frameworks)) + + return index_md + +def generate_slug(control): + """ + Generates a slug for a given control. + + Parameters + ---------- + control: The control object. + + Returns + ------- + str: The generated slug for the control. + + """ + return control['controlID'].lower().replace(".", "-") + +def get_configuration_parameters_info(): + """ + Fetches and obtains the control's configuration parameters information. + + Returns + ------- + tuple: A tuple containing two dictionaries - config_parameters and default_config_inputs. + - config_parameters: A dictionary mapping configuration parameter names to their corresponding configuration objects. + - default_config_inputs: A dictionary containing default configuration inputs. + """ + default_config_inputs = None + with open('default-config-inputs.json','r') as f: + default_config_inputs = json.load(f)['settings']['postureControlInputs'] + + config_parameters = {} + for control_json_file_name in filter(lambda fn: fn.endswith('.json'),os.listdir('controls')): + try: + control_obj = json.load(open(os.path.join('controls',control_json_file_name))) + control_obj['rules'] = [] + for rule_directory_name in os.listdir('rules'): + rule_metadata_file_name = os.path.join('rules',rule_directory_name,'rule.metadata.json') + if os.path.isfile(rule_metadata_file_name): + rule_obj = json.load(open(rule_metadata_file_name)) + if rule_obj['name'] in control_obj['rulesNames']: + control_obj['rules'].append(rule_obj) + if 'controlConfigInputs' in rule_obj: + for config in rule_obj['controlConfigInputs']: + name = config['path'].split('.')[-1] + config_parameters[name] = config + except Exception as e: + print('error processing %s: %s'%(control_json_file_name,e)) + + return config_parameters, default_config_inputs + +# Function to convert a control id to a doc order +def convert_control_id_to_doc_order(control_id: str) -> int: + """get a control_id and returns it's expected order in docs. + control_id is expected to either have "c-" or "cis-" prefix, otherwise raises an error. + + Parameters + ---------- + control_id : str + A string of structure "c-xxx" or "cis-x.y.z" + + Returns + --------- + int + + """ + control_id = control_id.lower() + + + if "c-" in control_id: + return int(control_id.replace("c-", "")) + + if "cis-" in control_id: + return convert_dotted_section_to_int(control_id.replace("cis-", "")) + + raise Exception(f"control_id structure unknown {control_id}") + +# Function to convert a dotted section to an int +def convert_dotted_section_to_int(subsection_id : str, + subsection_digits : int = 2, + n_subsections : int = 3) -> int: + """returns int representation of a dotted separated subsection string. + + Parameters + ---------- + subsection_id : str + A dotted subsection string - examples: 1.2, 2.3.12 + + subsection_digits : int, optional + The number of digits each subsection should have (default is 2) + + n_subsections : int, optional + The number of expected subsections (default is 3) + + Returns + --------- + int + + Examples (with default values): + --------- + convert_dotted_section_to_int("1.1.12", 2, 3) = 01.01.12 = 10112 + convert_dotted_section_to_int("1.1.1", 2, 3)= 01.01.01 = 10101 + convert_dotted_section_to_int("1.2.1", 2, 3) = 01.02.01 = 10201 + + convert_dotted_section_to_int("1.2", 3, 3) = 001.002.000 = 1002000 + + """ + + if subsection_id == "": + raise Exception("subsection_id string is empty") + + subsection_ids = subsection_id.split(".") + + res = "" + + # iterate each subsection + for subsection in subsection_ids: + current_subsection_id = subsection + + # identify the the subsection range and add "0"s to prefix if needed. + for i in range(1, subsection_digits): + if int(subsection) < 10**i: + current_subsection_id = "0"*(subsection_digits-i) + current_subsection_id + break + + res = res + current_subsection_id + + # if there are missing subsections, add "0"s to the right of the int + if n_subsections > len(subsection_ids): + res = res + "0"*subsection_digits*(n_subsections - len(subsection_ids)) + + return int(res) + +# Function to find inactive controls in docs +def find_inactive_controls_in_docs(list_docs : list, list_active: list) -> list: + """returns a list of controls that doesn't exist in rego but exit in docs. + + Parameters + ---------- + list_docs : list + a list of slugs in docs + + list_active: list + a list of active controls from rego + + + Returns + --------- + list - item that exist in list_docs but doesn't exist in list_active + + """ + return list(sorted(set(list_docs)- set(list_active))) + +def main(): + # Define the directory where the Markdown files should be created. + docs_dir = 'docs/controls' + + # Ensure the directory exists, if not create it + if not os.path.exists(docs_dir): + os.makedirs(docs_dir) + + # Fetches the Configuration parameters and related resources per control + config_parameters, default_config_inputs = get_configuration_parameters_info() + + # Processing and obtaining the parameters for each control + i = 0 + for config_parameters_path in sorted(list(config_parameters.keys())): + print('Processing ',config_parameters_path) + # Create md + md = '# %s\n' % config_parameters_path + md += '## Description\n' + md += config_parameters[config_parameters_path]['description'] + '\n' + md += '## Default values\n' + for dvalue in default_config_inputs[config_parameters_path]: + md += '* %s\n' % dvalue + + title = 'Parameter: %s' % config_parameters_path + config_parameter_slug = 'configuration_parameter_' + config_parameters_path.lower() + i = i + 1 + + controls = [] + # Process controls. + for control_json_file_name in filter(lambda fn: fn.endswith('.json'), os.listdir('controls')): + print('processing %s' % control_json_file_name) + control_obj = json.load(open(os.path.join('controls', control_json_file_name))) + + base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + if 'controlID' in control_obj: + controlID = control_obj['controlID'] + example_file_name = controlID.replace('C-00','c0') + '.yaml' + example_file_name = os.path.join('controls','examples',example_file_name) + if os.path.isfile(example_file_name): + with open(example_file_name) as f: + control_obj['example'] = f.read() + + if 'example' in control_obj and len(control_obj['example']) > 0 and control_obj['example'][0] == '@': + example_file_name = os.path.join(base_dir,control_obj['example'][1:]) + if os.path.isfile(example_file_name): + with open(example_file_name) as f: + control_obj['example'] = f.read() + else: + print('warning: %s is not a file' % example_file_name) + + control_obj['rules'] = [] + for rule_directory_name in os.listdir('rules'): + rule_metadata_file_name = os.path.join('rules',rule_directory_name,'rule.metadata.json') + if os.path.isfile(rule_metadata_file_name): + rule_obj = json.load(open(rule_metadata_file_name)) + if rule_obj['name'] in control_obj['rulesNames']: + control_obj['rules'].append(rule_obj) + + controls.append(control_obj) + + # Generate a Markdown document for the control. + md = create_md_for_control(control_obj) + + # Generate a slug for the control. + slug = generate_slug(control_obj) + + # Define the path of the Markdown file. + md_file_path = os.path.join(docs_dir, slug + '.md') + + # Write the Markdown document to the file. + with open(md_file_path, 'w') as md_file: + md_file.write(md) + + print('created or updated %s' % md_file_path) + + # Generate the index.md file + index_md = generate_index_md(controls) + + # Define the path of the index.md file. + index_md_file_path = os.path.join(docs_dir, "index.md") + + # Write the index.md file + with open(index_md_file_path, 'w') as index_md_file: + index_md_file.write(index_md) + + print('created or updated %s' % index_md_file_path) + +# Run the main function if the script is run as a standalone program +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/scripts/upload-readme.py b/scripts/upload-readme.py index d351f224e..3a8560845 100644 --- a/scripts/upload-readme.py +++ b/scripts/upload-readme.py @@ -1,14 +1,37 @@ +""" +This script is used to manage the documentation of controls present in the `controls` folder in a Readme API. +It fetches the controls from the `controls` directory, +processes them, and then creates or updates the corresponding documentation in the Readme API. +It also handles the deletion of inactive controls from the documentation. + +The script uses the Readme API's categories, docs, and versions endpoints. + +The script follows these main steps: +1. Authenticate with the Readme API using an API key. +2. Validate the structure of the Readme documentation. +3. Process configuration parameters and update or create corresponding documentation. +4. Process each control, create its documentation, and update or create it in the Readme API. +5. Delete documentation for inactive controls. +""" + import requests import os import json import re class ReadmeApi(object): + """ + The script uses the ReadmeApi class to interact with the Readme API. This class has methods to authenticate, get categories, + get docs in a category, get a specific doc, delete a doc, create a doc, and update a doc. + """ def __init__(self): super().__init__() self.doc_version = None def authenticate(self, api_key): + """ + Function to authenticate with the Readme API + """ r = requests.get('https://dash.readme.com/api/v1', auth=(api_key, '')) if r.status_code != 200: raise Exception('Failed to authenticate') @@ -18,9 +41,15 @@ def authenticate(self, api_key): self.api_key = api_key def set_version(self, version:str): + """ + Function to set the version of the documentation + """ self.doc_version = version def get_categories(self): + """ + Function to fetch and obtain all categories from the Readme API. + """ url = "https://dash.readme.com/api/v1/categories" querystring = {"perPage":"1000","page":"1"} @@ -33,6 +62,9 @@ def get_categories(self): return r.json() def get_category(self,category_slug : str): + """ + Function to fetch and obtain a specific category from the Readme API using its slug. + """ url = "https://dash.readme.com/api/v1/categories/%s" % category_slug r = requests.request("GET", url,headers={"Accept": "application/json"}, auth=(self.api_key, '')) @@ -43,6 +75,9 @@ def get_category(self,category_slug : str): return r.json() def get_docs_in_category(self, category_slug: str): + """ + Function to fetch and obatin all the docs related to or of a specific category from the Readme API using the category's slug. + """ url = "https://dash.readme.com/api/v1/categories/%s/docs" % category_slug r = requests.request("GET", url, headers={"Accept":"application/json"}, auth=(self.api_key, '')) @@ -53,6 +88,9 @@ def get_docs_in_category(self, category_slug: str): return r.json() def get_doc(self, doc_slug: str): + """ + Function to get a specific document from the Readme API using its slug. + """ url = "https://dash.readme.com/api/v1/docs/%s" % doc_slug r = requests.request("GET", url, headers={"Accept":"application/json"}, auth=(self.api_key, '')) @@ -65,6 +103,9 @@ def get_doc(self, doc_slug: str): return r.json() def delete_doc(self, doc_slug: str): + """ + Function to delete a specific doc from the Readme API using its slug. + """ url = "https://dash.readme.com/api/v1/docs/%s" % doc_slug r = requests.request("DELETE", url, headers={"Accept":"application/json"}, auth=(self.api_key, '')) @@ -73,6 +114,9 @@ def delete_doc(self, doc_slug: str): raise Exception('Failed to delete doc (%d)'%r.status_code) def create_doc(self, slug: str, parent_id: str, order: any, title: str, body: str, category: str): + """ + Function to create a new document in the Readme API. + """ url = "https://dash.readme.com/api/v1/docs" payload = { @@ -97,9 +141,11 @@ def create_doc(self, slug: str, parent_id: str, order: any, title: str, body: st raise Exception('Failed to create doc: %s'%r.text) return r.json() - + def update_doc(self, doc_slug: str, order: any, title: str, body: str, category: str): - + """ + Function to update a specific document in the Readme API using its slug. + """ url = "https://dash.readme.com/api/v1/docs/%s" % doc_slug payload = { @@ -121,9 +167,12 @@ def update_doc(self, doc_slug: str, order: any, title: str, body: str, category: return r.json() -# function is validating if the structure is validated and return an error if missing some objects. -# NOTE: objects might be changed from time to time, need to update accordingly + def validate_readme_structure(readmeapi : ReadmeApi): + """ + function is validating if the structure is validated and return an error if missing some objects. + NOTE: objects might be changed from time to time, need to update accordingly + """ categories = readmeapi.get_categories() filtered_categories = list(filter(lambda c: c['title'] == 'Review Controls',categories)) print(categories) @@ -138,6 +187,9 @@ def validate_readme_structure(readmeapi : ReadmeApi): raise Exception('Readme structure validation failure: missing "Controls" document') def get_document_for_control(readmeapi : ReadmeApi, control): + """ + Function to get the documentation for a specific control. It checks that there is exactly one "Controls" category and one document that starts with the control's id. + """ categories = readmeapi.get_categories() filtered_categories = list(filter(lambda c: c['title'] == 'Review Controls',categories)) if len(filtered_categories) != 1: @@ -151,9 +203,33 @@ def get_document_for_control(readmeapi : ReadmeApi, control): return control_doc def ignore_framework(framework_name: str): + """ + determines whether or not to ignore a framework based on its name. + + Parameters + ---------- + framework_name: the name of the framework + + Returns + -------- + True if the framework should be ignored, False otherwise + + """ return framework_name == 'YAML-scanning' or framework_name.startswith('developer') def get_frameworks_for_control(control): + """ + returns the frameworks a given control conforms to. + + Parameters + ---------- + control: the control object + + Returns + ------- + a list of framework names + + """ r = [] for frameworks_json_file_name in filter(lambda fn: fn.endswith('.json'),os.listdir('frameworks')): framework = json.load(open(os.path.join('frameworks',frameworks_json_file_name))) @@ -166,48 +242,66 @@ def get_frameworks_for_control(control): r.append(framework['name']) return r - def create_md_for_control(control): + """ + generates a markdown file for a given control. + + Parameters + ---------- + control: the control object + + Returns + ------- + the markdown text/file + + """ related_resources = set() control_config_input = {} host_sensor = False cloud_control = False + + # Loop through all the rules of the control for rule_obj in control['rules']: + # If the rule has a 'match' field, add its resources to the related resources if 'match' in rule_obj: for match_obj in rule_obj['match']: if 'resources' in match_obj: related_resources.update(set(match_obj['resources'])) + # If the rule has a 'controlConfigInputs' field, add its configuration to the control configuration input if 'controlConfigInputs' in rule_obj: for control_config in rule_obj['controlConfigInputs']: control_config_input[control_config['path']] = control_config + # If the rule has a 'attributes' field and it contains 'hostSensorRule', set host_sensor to True if 'attributes' in rule_obj: if 'hostSensorRule' in rule_obj['attributes']: host_sensor = True + # If the rule has a 'relevantCloudProviders' field and it is not empty, set cloud_control to True if 'relevantCloudProviders' in rule_obj: cloud_control = len(rule_obj['relevantCloudProviders']) > 0 + # Start creating the markdown text md_text = '' if host_sensor: - md_text += '## Prerequisites\n*Run Kubescape with host sensor (see [here](https://hub.armo.cloud/docs/host-sensor))*\n' + md_text += '## Prerequisites\n *Run Kubescape with host sensor (see [here](https://hub.armo.cloud/docs/host-sensor))*\n \n' if cloud_control: - md_text += '## Prerequisites\n*Integrate with cloud provider (see [here](https://hub.armosec.io/docs/kubescape-integration-with-cloud-providers))*\n' + md_text += '## Prerequisites\n *Integrate with cloud provider (see [here](https://hub.armosec.io/docs/kubescape-integration-with-cloud-providers))*\n \n' md_text += '## Framework\n' - md_text += ', '.join(get_frameworks_for_control(control)) + '\n' + md_text += ', '.join(get_frameworks_for_control(control)) + '\n \n' md_text += '## Severity\n' # severity map: https://github.com/kubescape/opa-utils/blob/master/reporthandling/apis/severity.go#L34 severity_map = {1:'Low',2:'Low',3:'Low',4:'Medium',5:'Medium',6:'Medium',7:'High',8:'High',9:'Critical',10:'Critical'} - md_text += '%s\n' % severity_map[int(control['baseScore'])] + md_text += '%s\n' % severity_map[int(control['baseScore'])] + '\n' md_text += '## Description of the the issue\n' description = control['long_description'] if 'long_description' in control else control['description'] if len(control_config_input): description += 'Note, this control is configurable. See below the details.' - md_text += description + '\n' + md_text += description + '\n \n' md_text += '## Related resources\n' - md_text += ', '.join(sorted(list(related_resources))) + '\n' + md_text += ', '.join(sorted(list(related_resources))) + '\n \n' md_text += '## What does this control test\n' test = control['test'] if 'test' in control else control['description'] - md_text += test + '\n' + md_text += test + '\n \n' if 'manual_test' in control: md_text += '## How to check it manually\n' @@ -215,33 +309,54 @@ def create_md_for_control(control): md_text += manual_test + '\n' md_text += '## Remediation\n' - md_text += control['remediation'] + '\n' + md_text += control['remediation'] + '\n \n' if 'impact_statement' in control: md_text += '### Impact Statement\n' + control['impact_statement'] + '\n' if 'default_value' in control: md_text += '### Default Value\n' + control['default_value'] + '\n' if len(control_config_input): - configuration_text = '## Configuration\nThis control can be configured using the following parameters. Read CLI/UI documentation about how to change parameters.\n' + configuration_text = '## Configuration\n This control can be configured using the following parameters. Read CLI/UI documentation about how to change parameters.\n \n' for control_config_name in control_config_input: control_config = control_config_input[control_config_name] configuration_text += '### ' + control_config['name'] + '\n' config_name = control_config['path'].split('.')[-1] configuration_text += '[' + config_name + '](doc:configuration_parameter_%s)'%config_name.lower() + '\n' - configuration_text += control_config['description'] + '\n' + configuration_text += control_config['description'] + '\n \n' md_text += configuration_text md_text += '## Example\n' if 'example' in control: - md_text += '```\n' +control['example'] + '\n```' + '\n' + md_text += '```\n' + control['example'] + '\n```' + '\n' else: md_text += 'No example\n' return md_text def generate_slug(control): + """ + Generates a slug for a given control. + + Parameters + ---------- + control: The control object. + + Returns + ------- + str: The generated slug for the control. + + """ return control['controlID'].lower().replace(".", "-") def get_configuration_parameters_info(): + """ + Fetches and obtains the control's configuration parameters information. + + Returns + ------- + tuple: A tuple containing two dictionaries - config_parameters and default_config_inputs. + - config_parameters: A dictionary mapping configuration parameter names to their corresponding configuration objects. + - default_config_inputs: A dictionary containing default configuration inputs. + """ default_config_inputs = None with open('default-config-inputs.json','r') as f: default_config_inputs = json.load(f)['settings']['postureControlInputs'] @@ -276,7 +391,7 @@ def main(): readmeapi.authenticate(API_KEY) print('Authenticated') - # Validated structure + # Validate structure validate_readme_structure(readmeapi) print('Readme structure validated') @@ -388,9 +503,6 @@ def main(): exit(0) - - - def convert_control_id_to_doc_order(control_id: str) -> int: """get a control_id and returns it's expected order in docs. control_id is expected to either have "c-" or "cis-" prefix, otherwise raises an error. @@ -416,8 +528,6 @@ def convert_control_id_to_doc_order(control_id: str) -> int: raise Exception(f"control_id structure unknown {control_id}") - - def convert_dotted_section_to_int(subsection_id : str, subsection_digits : int = 2, n_subsections : int = 3) -> int: @@ -472,8 +582,7 @@ def convert_dotted_section_to_int(subsection_id : str, res = res + "0"*subsection_digits*(n_subsections - len(subsection_ids)) return int(res) - - + def find_inactive_controls_in_docs(list_docs : list, list_active: list) -> list: """returns a list of controls that doesn't exist in rego but exit in docs. @@ -492,7 +601,7 @@ def find_inactive_controls_in_docs(list_docs : list, list_active: list) -> list: """ return list(sorted(set(list_docs)- set(list_active))) - + def get_controls_doc_slugs(readmeapi: ReadmeApi) -> list: """returns a list of slugs exist under the "controls" category @@ -515,4 +624,4 @@ def get_controls_doc_slugs(readmeapi: ReadmeApi) -> list: return child_docs if __name__ == '__main__': - main() + main() \ No newline at end of file From e8f716d12f18312f0c064ef10044b7b49245947b Mon Sep 17 00:00:00 2001 From: Rohit Date: Fri, 24 Nov 2023 23:48:29 +0530 Subject: [PATCH 088/195] adding comments Signed-off-by: Rohit --- .github/sync.yml | 6 +++++- .github/workflows/create-release.yaml | 4 ++-- .github/workflows/sync.yml | 8 ++++++-- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.github/sync.yml b/.github/sync.yml index d6a54b3dd..f925e60ab 100644 --- a/.github/sync.yml +++ b/.github/sync.yml @@ -1,3 +1,7 @@ -kubescape/kubescape.io: +# This is a config file used by the `sync.yml` action under workflows folder +# To determine which files are to be synced and where. +# You can configure it to sync files across multiples repositories or branches too. + +kubescape/kubescape.io: # Target repository - source: docs/controls dest: docs/docs/controls/ \ No newline at end of file diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml index 1031b4b13..ebdd8cd8c 100644 --- a/.github/workflows/create-release.yaml +++ b/.github/workflows/create-release.yaml @@ -166,5 +166,5 @@ jobs: README_API_KEY: ${{ secrets.README_API_KEY }} run: |- python ./scripts/upload-readme.py - - name: execute docs generator script - run: python ./scripts/mk-generator.py \ No newline at end of file + - name: execute docs generator script + run: python ./scripts/mk-generator.py # Script to generate controls library documentation \ No newline at end of file diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index d73907ad5..a8a63ea3f 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -1,3 +1,7 @@ +# The action is used to sync documentation of controls library with `kubescape.io` website +# The action checks for any files that are out of sync +# And opens a pull request in the target repository with the changes(if any) + name: Sync Files on: @@ -5,7 +9,7 @@ on: branches: - master paths: - - 'docs/controls/**' + - 'docs/controls/**' # The action is triggered everytime there is a push to the defined path workflow_dispatch: jobs: @@ -18,7 +22,7 @@ jobs: uses: BetaHuhn/repo-file-sync-action@v1 with: GH_PAT: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - COMMIT_BODY: "Sync control library docs from `regolibrary` repo" + COMMIT_BODY: "Sync documentation of controls library from `regolibrary` repository" PR_BODY: Syncing the Control Library docs from `regolibrary` repository to update the `controls` documentation PR_LABELS: automerge COMMIT_PREFIX: "" \ No newline at end of file From fb535f6ea8ca046d864e5bb714b2726e7ce6d3b5 Mon Sep 17 00:00:00 2001 From: Matthias Bertschy Date: Wed, 24 Jan 2024 08:39:55 +0100 Subject: [PATCH 089/195] rebase and fix formatting Signed-off-by: Matthias Bertschy --- .github/sync.yml | 4 ++-- .github/workflows/create-release.yaml | 4 ++-- .github/workflows/sync.yml | 4 ++-- scripts/mk-generator.py | 2 +- scripts/upload-readme.py | 28 +++++++++++++-------------- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/sync.yml b/.github/sync.yml index f925e60ab..a50434370 100644 --- a/.github/sync.yml +++ b/.github/sync.yml @@ -1,7 +1,7 @@ # This is a config file used by the `sync.yml` action under workflows folder # To determine which files are to be synced and where. -# You can configure it to sync files across multiples repositories or branches too. +# You can configure it to sync files across multiples repositories or branches too. kubescape/kubescape.io: # Target repository - source: docs/controls - dest: docs/docs/controls/ \ No newline at end of file + dest: docs/docs/controls/ diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml index ebdd8cd8c..9de029f70 100644 --- a/.github/workflows/create-release.yaml +++ b/.github/workflows/create-release.yaml @@ -166,5 +166,5 @@ jobs: README_API_KEY: ${{ secrets.README_API_KEY }} run: |- python ./scripts/upload-readme.py - - name: execute docs generator script - run: python ./scripts/mk-generator.py # Script to generate controls library documentation \ No newline at end of file + - name: execute docs generator script + run: python ./scripts/mk-generator.py # Script to generate controls library documentation diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index a8a63ea3f..88148b1d4 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -1,5 +1,5 @@ # The action is used to sync documentation of controls library with `kubescape.io` website -# The action checks for any files that are out of sync +# The action checks for any files that are out of sync # And opens a pull request in the target repository with the changes(if any) name: Sync Files @@ -25,4 +25,4 @@ jobs: COMMIT_BODY: "Sync documentation of controls library from `regolibrary` repository" PR_BODY: Syncing the Control Library docs from `regolibrary` repository to update the `controls` documentation PR_LABELS: automerge - COMMIT_PREFIX: "" \ No newline at end of file + COMMIT_PREFIX: "" diff --git a/scripts/mk-generator.py b/scripts/mk-generator.py index 264599bfb..7ed0a9ab9 100644 --- a/scripts/mk-generator.py +++ b/scripts/mk-generator.py @@ -414,4 +414,4 @@ def main(): # Run the main function if the script is run as a standalone program if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/scripts/upload-readme.py b/scripts/upload-readme.py index 3a8560845..289730b0b 100644 --- a/scripts/upload-readme.py +++ b/scripts/upload-readme.py @@ -141,7 +141,7 @@ def create_doc(self, slug: str, parent_id: str, order: any, title: str, body: st raise Exception('Failed to create doc: %s'%r.text) return r.json() - + def update_doc(self, doc_slug: str, order: any, title: str, body: str, category: str): """ Function to update a specific document in the Readme API using its slug. @@ -282,26 +282,26 @@ def create_md_for_control(control): # Start creating the markdown text md_text = '' if host_sensor: - md_text += '## Prerequisites\n *Run Kubescape with host sensor (see [here](https://hub.armo.cloud/docs/host-sensor))*\n \n' + md_text += '## Prerequisites\n*Run Kubescape with host sensor (see [here](https://hub.armo.cloud/docs/host-sensor))*\n' if cloud_control: - md_text += '## Prerequisites\n *Integrate with cloud provider (see [here](https://hub.armosec.io/docs/kubescape-integration-with-cloud-providers))*\n \n' + md_text += '## Prerequisites\n*Integrate with cloud provider (see [here](https://hub.armosec.io/docs/kubescape-integration-with-cloud-providers))*\n' md_text += '## Framework\n' - md_text += ', '.join(get_frameworks_for_control(control)) + '\n \n' + md_text += ', '.join(get_frameworks_for_control(control)) + '\n' md_text += '## Severity\n' # severity map: https://github.com/kubescape/opa-utils/blob/master/reporthandling/apis/severity.go#L34 severity_map = {1:'Low',2:'Low',3:'Low',4:'Medium',5:'Medium',6:'Medium',7:'High',8:'High',9:'Critical',10:'Critical'} - md_text += '%s\n' % severity_map[int(control['baseScore'])] + '\n' + md_text += '%s\n' % severity_map[int(control['baseScore'])] md_text += '## Description of the the issue\n' description = control['long_description'] if 'long_description' in control else control['description'] if len(control_config_input): description += 'Note, this control is configurable. See below the details.' - md_text += description + '\n \n' + md_text += description + '\n' md_text += '## Related resources\n' - md_text += ', '.join(sorted(list(related_resources))) + '\n \n' + md_text += ', '.join(sorted(list(related_resources))) + '\n' md_text += '## What does this control test\n' test = control['test'] if 'test' in control else control['description'] - md_text += test + '\n \n' + md_text += test + '\n' if 'manual_test' in control: md_text += '## How to check it manually\n' @@ -309,20 +309,20 @@ def create_md_for_control(control): md_text += manual_test + '\n' md_text += '## Remediation\n' - md_text += control['remediation'] + '\n \n' + md_text += control['remediation'] + '\n' if 'impact_statement' in control: md_text += '### Impact Statement\n' + control['impact_statement'] + '\n' if 'default_value' in control: md_text += '### Default Value\n' + control['default_value'] + '\n' if len(control_config_input): - configuration_text = '## Configuration\n This control can be configured using the following parameters. Read CLI/UI documentation about how to change parameters.\n \n' + configuration_text = '## Configuration\nThis control can be configured using the following parameters. Read CLI/UI documentation about how to change parameters.\n' for control_config_name in control_config_input: control_config = control_config_input[control_config_name] configuration_text += '### ' + control_config['name'] + '\n' config_name = control_config['path'].split('.')[-1] configuration_text += '[' + config_name + '](doc:configuration_parameter_%s)'%config_name.lower() + '\n' - configuration_text += control_config['description'] + '\n \n' + configuration_text += control_config['description'] + '\n' md_text += configuration_text md_text += '## Example\n' @@ -582,7 +582,7 @@ def convert_dotted_section_to_int(subsection_id : str, res = res + "0"*subsection_digits*(n_subsections - len(subsection_ids)) return int(res) - + def find_inactive_controls_in_docs(list_docs : list, list_active: list) -> list: """returns a list of controls that doesn't exist in rego but exit in docs. @@ -601,7 +601,7 @@ def find_inactive_controls_in_docs(list_docs : list, list_active: list) -> list: """ return list(sorted(set(list_docs)- set(list_active))) - + def get_controls_doc_slugs(readmeapi: ReadmeApi) -> list: """returns a list of slugs exist under the "controls" category @@ -624,4 +624,4 @@ def get_controls_doc_slugs(readmeapi: ReadmeApi) -> list: return child_docs if __name__ == '__main__': - main() \ No newline at end of file + main() From fe14341544328f28de4b5fcedc67ee4d8a73d7e9 Mon Sep 17 00:00:00 2001 From: Ben Date: Thu, 25 Jan 2024 12:34:51 +0200 Subject: [PATCH 090/195] Adding control C-0265 Signed-off-by: Ben --- controls/C-0265-authenticateduserhasrbac.json | 24 ++++++ frameworks/allcontrols.json | 8 +- frameworks/clusterscan.json | 6 ++ frameworks/security.json | 12 ++- .../raw.rego | 65 ++++++++++++++++ .../rule.metadata.json | 27 +++++++ .../test/fail/expected.json | 74 +++++++++++++++++++ .../test/fail/input/clusterrole.yaml | 18 +++++ .../test/fail/input/clusterrolebinding.yaml | 12 +++ .../test/success/expected.json | 1 + .../test/success/input/rolebinding.yaml | 26 +++++++ 11 files changed, 269 insertions(+), 4 deletions(-) create mode 100644 controls/C-0265-authenticateduserhasrbac.json create mode 100644 rules/system-authenticated-allowed-to-take-over-cluster/raw.rego create mode 100644 rules/system-authenticated-allowed-to-take-over-cluster/rule.metadata.json create mode 100644 rules/system-authenticated-allowed-to-take-over-cluster/test/fail/expected.json create mode 100644 rules/system-authenticated-allowed-to-take-over-cluster/test/fail/input/clusterrole.yaml create mode 100644 rules/system-authenticated-allowed-to-take-over-cluster/test/fail/input/clusterrolebinding.yaml create mode 100644 rules/system-authenticated-allowed-to-take-over-cluster/test/success/expected.json create mode 100644 rules/system-authenticated-allowed-to-take-over-cluster/test/success/input/rolebinding.yaml diff --git a/controls/C-0265-authenticateduserhasrbac.json b/controls/C-0265-authenticateduserhasrbac.json new file mode 100644 index 000000000..1d023639e --- /dev/null +++ b/controls/C-0265-authenticateduserhasrbac.json @@ -0,0 +1,24 @@ +{ + "controlID": "C-0265", + "name": "system:authenticated user has elevated roles", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", + "attributes": { + }, + "rulesNames": [ + "system-authenticated-allowed-to-take-over-cluster" + ], + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain" + } + }, + "scanningScope": { + "matches": [ + "cluster" + ] + } +} \ No newline at end of file diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index f8f28441c..24461d865 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -372,11 +372,17 @@ "name": "CVE-2022-47633-kyverno-signature-bypass" } }, - { + { "controlID": "C-0262", "patch": { "name": "Anonymous access enabled" } + }, + { + "controlID": "C-0265", + "patch": { + "name": "Authenticated user has sensitive permissions" + } } ] } diff --git a/frameworks/clusterscan.json b/frameworks/clusterscan.json index 2b805b803..1d19a251f 100644 --- a/frameworks/clusterscan.json +++ b/frameworks/clusterscan.json @@ -38,6 +38,12 @@ "name": "Anonymous access enabled" } }, + { + "controlID": "C-0265", + "patch": { + "name": "Authenticated user has sensitive permissions" + } + }, { "controlID": "C-0015", "patch": { diff --git a/frameworks/security.json b/frameworks/security.json index 23a6b2765..cf5306156 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -26,7 +26,7 @@ "name": "Immutable container filesystem" } }, - { + { "controlID": "C-0256", "patch": { "name": "Exposure to Internet" @@ -62,7 +62,7 @@ "name": "ServiceAccount token mounted" } }, - { + { "controlID": "C-0255", "patch": { "name": "Workload with secret access" @@ -104,11 +104,17 @@ "name": "Apply Security Context to Your Pods and Containers" } }, - { + { "controlID": "C-0262", "patch": { "name": "Anonymous access enabled" } + }, + { + "controlID": "C-0265", + "patch": { + "name": "Authenticated user has sensitive permissions" + } } ] } diff --git a/rules/system-authenticated-allowed-to-take-over-cluster/raw.rego b/rules/system-authenticated-allowed-to-take-over-cluster/raw.rego new file mode 100644 index 000000000..251abcbe2 --- /dev/null +++ b/rules/system-authenticated-allowed-to-take-over-cluster/raw.rego @@ -0,0 +1,65 @@ +package armo_builtins + +import future.keywords.in + +deny[msga] { + subjectVector := input[_] + + rolebinding := subjectVector.relatedObjects[j] + endswith(rolebinding.kind, "Binding") + + + subject := rolebinding.subjects[k] + # Check if the subject is gourp + subject.kind == "Group" + # Check if the subject is system:authenticated + subject.name == "system:authenticated" + + + # Find the bound roles + role := subjectVector.relatedObjects[i] + endswith(role.kind, "Role") + + # Check if the role and rolebinding bound + is_same_role_and_binding(role, rolebinding) + + + # Check if the role has access to workloads, exec, attach, portforward + rule := role.rules[p] + rule.resources[l] in ["*","pods", "pods/exec", "pods/attach", "pods/portforward","deployments","statefulset","daemonset","jobs","cronjobs","nodes","secrets"] + + finalpath := array.concat([""], [ + sprintf("relatedObjects[%d].subjects[%d]", [j, k]), + sprintf("relatedObjects[%d].roleRef.name", [i]), + ]) + + msga := { + "alertMessage": "system:authenticated has sensitive roles", + "alertScore": 5, + "reviewPaths": finalpath, + "failedPaths": finalpath, + "fixPaths": [], + "packagename": "armo_builtins", + "alertObject": { + "k8sApiObjects": [], + "externalObjects" : subjectVector + }, + } +} + +is_same_role_and_binding(role, rolebinding) { + rolebinding.kind == "RoleBinding" + role.kind == "Role" + rolebinding.metadata.namespace == role.metadata.namespace + rolebinding.roleRef.name == role.metadata.name + rolebinding.roleRef.kind == role.kind + startswith(role.apiVersion, rolebinding.roleRef.apiGroup) +} + +is_same_role_and_binding(role, rolebinding) { + rolebinding.kind == "ClusterRoleBinding" + role.kind == "ClusterRole" + rolebinding.roleRef.name == role.metadata.name + rolebinding.roleRef.kind == role.kind + startswith(role.apiVersion, rolebinding.roleRef.apiGroup) +} \ No newline at end of file diff --git a/rules/system-authenticated-allowed-to-take-over-cluster/rule.metadata.json b/rules/system-authenticated-allowed-to-take-over-cluster/rule.metadata.json new file mode 100644 index 000000000..37a004adc --- /dev/null +++ b/rules/system-authenticated-allowed-to-take-over-cluster/rule.metadata.json @@ -0,0 +1,27 @@ +{ + "name": "system-authenticated-allowed-to-take-over-cluster", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in system:authenticated user has cluster takeover rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow system:authenticated users to perform actions", + "ruleQuery": "armo_builtins" +} diff --git a/rules/system-authenticated-allowed-to-take-over-cluster/test/fail/expected.json b/rules/system-authenticated-allowed-to-take-over-cluster/test/fail/expected.json new file mode 100644 index 000000000..28ad87b4f --- /dev/null +++ b/rules/system-authenticated-allowed-to-take-over-cluster/test/fail/expected.json @@ -0,0 +1,74 @@ +[ + { + "alertMessage": "system:authenticated has sensitive roles", + "alertObject": { + "externalObjects": { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "Group", + "name": "system:authenticated", + "relatedObjects": [ + { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRoleBinding", + "metadata": { + "name": "system:viewer" + }, + "roleRef": { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "name": "system:viewer" + }, + "subjects": [ + { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "Group", + "name": "system:authenticated" + } + ] + }, + { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": { + "name": "system:viewer" + }, + "rules": [ + { + "apiGroups": [ + "" + ], + "resources": [ + "nodes", + "nodes/*", + "namespaces", + "namespaces/*", + "pods", + "pods/*" + ], + "verbs": [ + "get", + "list", + "watch" + ] + } + ] + } + ] + }, + "k8sApiObjects": [] + }, + "alertScore": 5, + "failedPaths": [ + "", + "relatedObjects[0].subjects[0]", + "relatedObjects[1].roleRef.name" + ], + "fixPaths": [], + "packagename": "armo_builtins", + "reviewPaths": [ + "", + "relatedObjects[0].subjects[0]", + "relatedObjects[1].roleRef.name" + ] + } +] \ No newline at end of file diff --git a/rules/system-authenticated-allowed-to-take-over-cluster/test/fail/input/clusterrole.yaml b/rules/system-authenticated-allowed-to-take-over-cluster/test/fail/input/clusterrole.yaml new file mode 100644 index 000000000..a374bc4be --- /dev/null +++ b/rules/system-authenticated-allowed-to-take-over-cluster/test/fail/input/clusterrole.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:viewer +rules: +- apiGroups: + - "" + resources: + - nodes + - nodes/* + - namespaces + - namespaces/* + - pods + - pods/* + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/rules/system-authenticated-allowed-to-take-over-cluster/test/fail/input/clusterrolebinding.yaml b/rules/system-authenticated-allowed-to-take-over-cluster/test/fail/input/clusterrolebinding.yaml new file mode 100644 index 000000000..1c989e816 --- /dev/null +++ b/rules/system-authenticated-allowed-to-take-over-cluster/test/fail/input/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:viewer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:viewer +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated \ No newline at end of file diff --git a/rules/system-authenticated-allowed-to-take-over-cluster/test/success/expected.json b/rules/system-authenticated-allowed-to-take-over-cluster/test/success/expected.json new file mode 100644 index 000000000..fe51488c7 --- /dev/null +++ b/rules/system-authenticated-allowed-to-take-over-cluster/test/success/expected.json @@ -0,0 +1 @@ +[] diff --git a/rules/system-authenticated-allowed-to-take-over-cluster/test/success/input/rolebinding.yaml b/rules/system-authenticated-allowed-to-take-over-cluster/test/success/input/rolebinding.yaml new file mode 100644 index 000000000..3909d713d --- /dev/null +++ b/rules/system-authenticated-allowed-to-take-over-cluster/test/success/input/rolebinding.yaml @@ -0,0 +1,26 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:viewer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:viewer +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:viewer +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch \ No newline at end of file From b956d20a79e8857c82766e81aa6d305e5fc92c5a Mon Sep 17 00:00:00 2001 From: Avraham Shalev <8184528+avrahams@users.noreply.github.com> Date: Wed, 31 Jan 2024 14:43:53 +0200 Subject: [PATCH 091/195] + controls to security framework Signed-off-by: Avraham Shalev <8184528+avrahams@users.noreply.github.com> --- frameworks/security.json | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/frameworks/security.json b/frameworks/security.json index cf5306156..42010a264 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -115,6 +115,18 @@ "patch": { "name": "Authenticated user has sensitive permissions" } + }, + { + "controlID": "C-0057", + "patch": { + "name": "Privileged container" + } + }, + { + "controlID": "C-0038", + "patch": { + "name": "Host PID/IPC privileges" + } } ] } From 0b494280b20e4b18f5496afa0ee0a345ba1cfbc8 Mon Sep 17 00:00:00 2001 From: Ben Date: Sun, 4 Feb 2024 22:17:26 +0200 Subject: [PATCH 092/195] Support HTTPRoute in Gateway API Signed-off-by: Ben --- rules/exposure-to-internet/raw.rego | 64 ++++++++++++- rules/exposure-to-internet/rule.metadata.json | 11 +++ .../test/failed_with_httproute/expected.json | 21 +++++ .../input/deployment.yaml | 93 +++++++++++++++++++ .../input/httproute.yaml | 51 ++++++++++ .../failed_with_httproute/input/service.yaml | 34 +++++++ testrunner/opaprocessor/processorutils.go | 1 + 7 files changed, 271 insertions(+), 4 deletions(-) create mode 100644 rules/exposure-to-internet/test/failed_with_httproute/expected.json create mode 100644 rules/exposure-to-internet/test/failed_with_httproute/input/deployment.yaml create mode 100644 rules/exposure-to-internet/test/failed_with_httproute/input/httproute.yaml create mode 100644 rules/exposure-to-internet/test/failed_with_httproute/input/service.yaml diff --git a/rules/exposure-to-internet/raw.rego b/rules/exposure-to-internet/raw.rego index 942e7e4e6..78cb19b74 100644 --- a/rules/exposure-to-internet/raw.rego +++ b/rules/exposure-to-internet/raw.rego @@ -5,7 +5,7 @@ deny[msga] { service := input[_] service.kind == "Service" is_exposed_service(service) - + wl := input[_] spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Pod", "Job", "CronJob"} spec_template_spec_patterns[wl.kind] @@ -32,7 +32,7 @@ deny[msga] { deny[msga] { ingress := input[_] ingress.kind == "Ingress" - + svc := input[_] svc.kind == "Service" @@ -49,7 +49,7 @@ deny[msga] { wl_connected_to_service(wl, svc) result := svc_connected_to_ingress(svc, ingress) - + msga := { "alertMessage": sprintf("workload '%v' is exposed through ingress '%v'", [wl.metadata.name, ingress.metadata.name]), "packagename": "armo_builtins", @@ -70,7 +70,51 @@ deny[msga] { } ] } -} +} + +deny[msga] { + httproute := input[_] + httproute.kind == "HTTPRoute" + + svc := input[_] + svc.kind == "Service" + + # Make sure that they belong to the same namespace + svc.metadata.namespace == httproute.metadata.namespace + + # avoid duplicate alerts + # if service is already exposed through NodePort or LoadBalancer workload will fail on that + not is_exposed_service(svc) + + wl := input[_] + wl.metadata.namespace == svc.metadata.namespace + spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Pod", "Job", "CronJob"} + spec_template_spec_patterns[wl.kind] + wl_connected_to_service(wl, svc) + + result := svc_connected_to_httproute(svc, httproute) + + msga := { + "alertMessage": sprintf("workload '%v' is exposed through httproute '%v'", [wl.metadata.name, httproute.metadata.name]), + "packagename": "armo_builtins", + "failedPaths": [], + "fixPaths": [], + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [wl] + }, + "relatedObjects": [ + { + "object": httproute, + "reviewPaths": result, + "failedPaths": result, + }, + { + "object": svc, + } + ] + } +} # ==================================================================================== @@ -90,6 +134,10 @@ wl_connected_to_service(wl, svc) { wl.spec.selector.matchLabels == svc.spec.selector } +wl_connected_to_service(wl, svc) { + count({x | svc.spec.selector[x] == wl.spec.template.metadata.labels[x]}) == count(svc.spec.selector) +} + # check if service is connected to ingress svc_connected_to_ingress(svc, ingress) = result { rule := ingress.spec.rules[i] @@ -98,3 +146,11 @@ svc_connected_to_ingress(svc, ingress) = result { result := [sprintf("spec.rules[%d].http.paths[%d].backend.service.name", [i,j])] } +svc_connected_to_httproute(svc, httproute) = result { + rule := httproute.spec.rules[i] + ref := rule.backendRefs[j] + ref.kind == "Service" + svc.metadata.name == ref.name + result := [sprintf("spec.rules[%d].backendRefs[%d].name", [i,j])] +} + diff --git a/rules/exposure-to-internet/rule.metadata.json b/rules/exposure-to-internet/rule.metadata.json index d1357ee94..ae44ef311 100644 --- a/rules/exposure-to-internet/rule.metadata.json +++ b/rules/exposure-to-internet/rule.metadata.json @@ -52,6 +52,17 @@ "resources": [ "Ingress" ] + }, + { + "apiGroups": [ + "gateway.networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "HTTPRoute" + ] } ], "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", diff --git a/rules/exposure-to-internet/test/failed_with_httproute/expected.json b/rules/exposure-to-internet/test/failed_with_httproute/expected.json new file mode 100644 index 000000000..84bc8246a --- /dev/null +++ b/rules/exposure-to-internet/test/failed_with_httproute/expected.json @@ -0,0 +1,21 @@ +[ + { + "alertMessage": "workload 'httpbin' is exposed through httproute 'httpbin'", + "failedPaths": [], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "httpbin" + } + } + ] + } + } + ] \ No newline at end of file diff --git a/rules/exposure-to-internet/test/failed_with_httproute/input/deployment.yaml b/rules/exposure-to-internet/test/failed_with_httproute/input/deployment.yaml new file mode 100644 index 000000000..2b40cae26 --- /dev/null +++ b/rules/exposure-to-internet/test/failed_with_httproute/input/deployment.yaml @@ -0,0 +1,93 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + creationTimestamp: "2024-02-04T19:05:12Z" + generation: 1 + name: httpbin + namespace: httpbin + resourceVersion: "870" + uid: 7462bb4c-b5a2-413e-80ee-c1baaf34aade +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: httpbin + version: v1 + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app: httpbin + version: v1 + spec: + containers: + - args: + - -port + - "8080" + - -max-duration + - 600s + command: + - go-httpbin + image: docker.io/mccutchen/go-httpbin:v2.6.0 + imagePullPolicy: IfNotPresent + name: httpbin + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + - command: + - tail + - -f + - /dev/null + image: curlimages/curl:7.83.1 + imagePullPolicy: IfNotPresent + name: curl + resources: + limits: + cpu: 200m + requests: + cpu: 100m + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + - image: gcr.io/solo-public/docs/hey:0.1.4 + imagePullPolicy: IfNotPresent + name: hey + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: httpbin + serviceAccountName: httpbin + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2024-02-04T19:05:32Z" + lastUpdateTime: "2024-02-04T19:05:32Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2024-02-04T19:05:12Z" + lastUpdateTime: "2024-02-04T19:05:32Z" + message: ReplicaSet "httpbin-f46cc8b9b" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/rules/exposure-to-internet/test/failed_with_httproute/input/httproute.yaml b/rules/exposure-to-internet/test/failed_with_httproute/input/httproute.yaml new file mode 100644 index 000000000..44b941b78 --- /dev/null +++ b/rules/exposure-to-internet/test/failed_with_httproute/input/httproute.yaml @@ -0,0 +1,51 @@ +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + creationTimestamp: "2024-02-04T19:06:03Z" + generation: 1 + labels: + example: httpbin-route + name: httpbin + namespace: httpbin + resourceVersion: "914" + uid: fd820080-801d-4fa7-934a-e23abe8bf746 +spec: + hostnames: + - www.example.com + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: http + namespace: gloo-system + rules: + - backendRefs: + - group: "" + kind: Service + name: httpbin + port: 8000 + weight: 1 + matches: + - path: + type: PathPrefix + value: / +status: + parents: + - conditions: + - lastTransitionTime: "2024-02-04T19:06:03Z" + message: "" + observedGeneration: 1 + reason: Accepted + status: "True" + type: Accepted + - lastTransitionTime: "2024-02-04T19:06:03Z" + message: "" + observedGeneration: 1 + reason: ResolvedRefs + status: "True" + type: ResolvedRefs + controllerName: solo.io/gloo-gateway + parentRef: + group: gateway.networking.k8s.io + kind: Gateway + name: http + namespace: gloo-system diff --git a/rules/exposure-to-internet/test/failed_with_httproute/input/service.yaml b/rules/exposure-to-internet/test/failed_with_httproute/input/service.yaml new file mode 100644 index 000000000..40e721d26 --- /dev/null +++ b/rules/exposure-to-internet/test/failed_with_httproute/input/service.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: "2024-02-04T19:05:12Z" + labels: + app: httpbin + service: httpbin + name: httpbin + namespace: httpbin + resourceVersion: "811" + uid: c391feb7-54e5-41b2-869b-33166869f1b7 +spec: + clusterIP: 10.96.162.234 + clusterIPs: + - 10.96.162.234 + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: http + port: 8000 + protocol: TCP + targetPort: 8080 + - name: tcp + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app: httpbin + sessionAffinity: None + type: ClusterIP +status: + loadBalancer: {} diff --git a/testrunner/opaprocessor/processorutils.go b/testrunner/opaprocessor/processorutils.go index eddb8beb6..c242222bc 100644 --- a/testrunner/opaprocessor/processorutils.go +++ b/testrunner/opaprocessor/processorutils.go @@ -161,6 +161,7 @@ func AssertResponses(t *testing.T, responses []reporthandling.RuleResponse, expe return err } + //fmt.Println("actual:", string(actual)) require.JSONEq(t, string(expected), string(actual)) return nil } From bd9383d8dd407369f67af01763aeba5d8ededd8a Mon Sep 17 00:00:00 2001 From: Ben Date: Mon, 5 Feb 2024 21:54:02 +0200 Subject: [PATCH 093/195] Separating the new functionality to a new control Signed-off-by: Ben --- .../C-0266-exposuretointernet-gateway.json | 33 ++++++++ .../raw.rego | 77 +++++++++++++++++++ .../rule.metadata.json | 60 +++++++++++++++ .../test/failed_with_httproute/expected.json | 0 .../input/deployment.yaml | 0 .../input/httproute.yaml | 0 .../failed_with_httproute/input/service.yaml | 0 rules/exposure-to-internet/raw.rego | 51 ------------ rules/exposure-to-internet/rule.metadata.json | 11 --- 9 files changed, 170 insertions(+), 62 deletions(-) create mode 100644 controls/C-0266-exposuretointernet-gateway.json create mode 100644 rules/exposure-to-internet-via-gateway-api/raw.rego create mode 100644 rules/exposure-to-internet-via-gateway-api/rule.metadata.json rename rules/{exposure-to-internet => exposure-to-internet-via-gateway-api}/test/failed_with_httproute/expected.json (100%) rename rules/{exposure-to-internet => exposure-to-internet-via-gateway-api}/test/failed_with_httproute/input/deployment.yaml (100%) rename rules/{exposure-to-internet => exposure-to-internet-via-gateway-api}/test/failed_with_httproute/input/httproute.yaml (100%) rename rules/{exposure-to-internet => exposure-to-internet-via-gateway-api}/test/failed_with_httproute/input/service.yaml (100%) diff --git a/controls/C-0266-exposuretointernet-gateway.json b/controls/C-0266-exposuretointernet-gateway.json new file mode 100644 index 000000000..3eb0665fe --- /dev/null +++ b/controls/C-0266-exposuretointernet-gateway.json @@ -0,0 +1,33 @@ +{ + "name": "Exposure to internet via Gateway API", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + } + ] + }, + "description": "This control detect workloads that are exposed on Internet through a Gateway API (HTTPRoute,TCPRoute, UDPRoute). It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "rulesNames": ["exposure-to-internet-via-gateway-api"], + "test": "Checks if workloads are exposed through the use of Gateway API (HTTPRoute, TCPRoute, UDPRoute).", + "controlID": "C-0266", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } +} diff --git a/rules/exposure-to-internet-via-gateway-api/raw.rego b/rules/exposure-to-internet-via-gateway-api/raw.rego new file mode 100644 index 000000000..a6173c363 --- /dev/null +++ b/rules/exposure-to-internet-via-gateway-api/raw.rego @@ -0,0 +1,77 @@ +package armo_builtins + + +deny[msga] { + httproute := input[_] + httproute.kind == "HTTPRoute" + + svc := input[_] + svc.kind == "Service" + + # Make sure that they belong to the same namespace + svc.metadata.namespace == httproute.metadata.namespace + + # avoid duplicate alerts + # if service is already exposed through NodePort or LoadBalancer workload will fail on that + not is_exposed_service(svc) + + wl := input[_] + wl.metadata.namespace == svc.metadata.namespace + spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Pod", "Job", "CronJob"} + spec_template_spec_patterns[wl.kind] + wl_connected_to_service(wl, svc) + + result := svc_connected_to_httproute(svc, httproute) + + msga := { + "alertMessage": sprintf("workload '%v' is exposed through httproute '%v'", [wl.metadata.name, httproute.metadata.name]), + "packagename": "armo_builtins", + "failedPaths": [], + "fixPaths": [], + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [wl] + }, + "relatedObjects": [ + { + "object": httproute, + "reviewPaths": result, + "failedPaths": result, + }, + { + "object": svc, + } + ] + } +} + +# ==================================================================================== + +is_exposed_service(svc) { + svc.spec.type == "NodePort" +} + +is_exposed_service(svc) { + svc.spec.type == "LoadBalancer" +} + +wl_connected_to_service(wl, svc) { + count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector) +} + +wl_connected_to_service(wl, svc) { + wl.spec.selector.matchLabels == svc.spec.selector +} + +wl_connected_to_service(wl, svc) { + count({x | svc.spec.selector[x] == wl.spec.template.metadata.labels[x]}) == count(svc.spec.selector) +} + +svc_connected_to_httproute(svc, httproute) = result { + rule := httproute.spec.rules[i] + ref := rule.backendRefs[j] + ref.kind == "Service" + svc.metadata.name == ref.name + result := [sprintf("spec.rules[%d].backendRefs[%d].name", [i,j])] +} + diff --git a/rules/exposure-to-internet-via-gateway-api/rule.metadata.json b/rules/exposure-to-internet-via-gateway-api/rule.metadata.json new file mode 100644 index 000000000..b2d1a4818 --- /dev/null +++ b/rules/exposure-to-internet-via-gateway-api/rule.metadata.json @@ -0,0 +1,60 @@ +{ + "name": "exposure-to-internet-via-gateway-api", + "attributes": { + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "gateway.networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "HTTPRoute" + ] + } + ], + "description": "fails in case the running workload has binded Service and Gateway that are exposing it on Internet.", + "remediation": "", + "ruleQuery": "armo_builtins" +} diff --git a/rules/exposure-to-internet/test/failed_with_httproute/expected.json b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/expected.json similarity index 100% rename from rules/exposure-to-internet/test/failed_with_httproute/expected.json rename to rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/expected.json diff --git a/rules/exposure-to-internet/test/failed_with_httproute/input/deployment.yaml b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/input/deployment.yaml similarity index 100% rename from rules/exposure-to-internet/test/failed_with_httproute/input/deployment.yaml rename to rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/input/deployment.yaml diff --git a/rules/exposure-to-internet/test/failed_with_httproute/input/httproute.yaml b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/input/httproute.yaml similarity index 100% rename from rules/exposure-to-internet/test/failed_with_httproute/input/httproute.yaml rename to rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/input/httproute.yaml diff --git a/rules/exposure-to-internet/test/failed_with_httproute/input/service.yaml b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/input/service.yaml similarity index 100% rename from rules/exposure-to-internet/test/failed_with_httproute/input/service.yaml rename to rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/input/service.yaml diff --git a/rules/exposure-to-internet/raw.rego b/rules/exposure-to-internet/raw.rego index 78cb19b74..d4e849926 100644 --- a/rules/exposure-to-internet/raw.rego +++ b/rules/exposure-to-internet/raw.rego @@ -72,50 +72,6 @@ deny[msga] { } } -deny[msga] { - httproute := input[_] - httproute.kind == "HTTPRoute" - - svc := input[_] - svc.kind == "Service" - - # Make sure that they belong to the same namespace - svc.metadata.namespace == httproute.metadata.namespace - - # avoid duplicate alerts - # if service is already exposed through NodePort or LoadBalancer workload will fail on that - not is_exposed_service(svc) - - wl := input[_] - wl.metadata.namespace == svc.metadata.namespace - spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Pod", "Job", "CronJob"} - spec_template_spec_patterns[wl.kind] - wl_connected_to_service(wl, svc) - - result := svc_connected_to_httproute(svc, httproute) - - msga := { - "alertMessage": sprintf("workload '%v' is exposed through httproute '%v'", [wl.metadata.name, httproute.metadata.name]), - "packagename": "armo_builtins", - "failedPaths": [], - "fixPaths": [], - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [wl] - }, - "relatedObjects": [ - { - "object": httproute, - "reviewPaths": result, - "failedPaths": result, - }, - { - "object": svc, - } - ] - } -} - # ==================================================================================== is_exposed_service(svc) { @@ -146,11 +102,4 @@ svc_connected_to_ingress(svc, ingress) = result { result := [sprintf("spec.rules[%d].http.paths[%d].backend.service.name", [i,j])] } -svc_connected_to_httproute(svc, httproute) = result { - rule := httproute.spec.rules[i] - ref := rule.backendRefs[j] - ref.kind == "Service" - svc.metadata.name == ref.name - result := [sprintf("spec.rules[%d].backendRefs[%d].name", [i,j])] -} diff --git a/rules/exposure-to-internet/rule.metadata.json b/rules/exposure-to-internet/rule.metadata.json index ae44ef311..d1357ee94 100644 --- a/rules/exposure-to-internet/rule.metadata.json +++ b/rules/exposure-to-internet/rule.metadata.json @@ -52,17 +52,6 @@ "resources": [ "Ingress" ] - }, - { - "apiGroups": [ - "gateway.networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "HTTPRoute" - ] } ], "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", From 3999548cf6ba2dcd31645398fcbbde3358af5617 Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Wed, 7 Feb 2024 15:17:23 +0200 Subject: [PATCH 094/195] Fixed reviewPaths Signed-off-by: David Wertenteil --- rules/non-root-containers/raw.rego | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rules/non-root-containers/raw.rego b/rules/non-root-containers/raw.rego index f682dd83b..618d4b41f 100644 --- a/rules/non-root-containers/raw.rego +++ b/rules/non-root-containers/raw.rego @@ -19,7 +19,7 @@ deny[msga] { "alertMessage": sprintf("container: %v in pod: %v may run as root", [container.name, pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": "", + "reviewPaths": [], "failedPaths": [], "fixPaths": fixPaths, "alertObject": { @@ -46,7 +46,7 @@ deny[msga] { "alertMessage": sprintf("container: %v in %v: %v may run as root", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": "", + "reviewPaths": [], "failedPaths": [], "fixPaths": fixPaths, "alertObject": { @@ -73,7 +73,7 @@ deny[msga] { "alertMessage": sprintf("container: %v in %v: %v may run as root", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": "", + "reviewPaths": [], "failedPaths": [], "fixPaths": fixPaths, "alertObject": { From 8bc2869af642f068e3beefa4e5690a601be28bc0 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 11 Feb 2024 11:56:03 +0200 Subject: [PATCH 095/195] change C-0035 name to `Administrative Roles` Signed-off-by: YiscahLevySilas1 --- controls/C-0035-clusteradminbinding.json | 2 +- frameworks/__YAMLscan.json | 2 +- frameworks/allcontrols.json | 2 +- frameworks/armobest.json | 2 +- frameworks/clusterscan.json | 2 +- frameworks/mitre.json | 2 +- frameworks/nsaframework.json | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/controls/C-0035-clusteradminbinding.json b/controls/C-0035-clusteradminbinding.json index ef6cf7da4..e2f1e987e 100644 --- a/controls/C-0035-clusteradminbinding.json +++ b/controls/C-0035-clusteradminbinding.json @@ -1,5 +1,5 @@ { - "name": "Cluster-admin binding", + "name": "Administrative Roles", "attributes": { "microsoftMitreColumns": [ "Privilege escalation" diff --git a/frameworks/__YAMLscan.json b/frameworks/__YAMLscan.json index 2e05517ce..a88bca10c 100644 --- a/frameworks/__YAMLscan.json +++ b/frameworks/__YAMLscan.json @@ -29,7 +29,7 @@ "Delete Kubernetes events", "Access tiller endpoint", "Automatic mapping of service account", - "Cluster-admin binding", + "Administrative Roles", "Validate admission controller (validating)", "CoreDNS poisoning", "Host PID/IPC privileges", diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index 24461d865..2ebf4eed0 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -129,7 +129,7 @@ { "controlID": "C-0035", "patch": { - "name": "Cluster-admin binding" + "name": "Administrative Roles" } }, { diff --git a/frameworks/armobest.json b/frameworks/armobest.json index 158dfba39..bb2196030 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -75,7 +75,7 @@ { "controlID": "C-0035", "patch": { - "name": "Cluster-admin binding" + "name": "Administrative Roles" } }, { diff --git a/frameworks/clusterscan.json b/frameworks/clusterscan.json index 1d19a251f..2a4501b1b 100644 --- a/frameworks/clusterscan.json +++ b/frameworks/clusterscan.json @@ -83,7 +83,7 @@ { "controlID": "C-0035", "patch": { - "name": "Cluster-admin binding" + "name": "Administrative Roles" } }, { diff --git a/frameworks/mitre.json b/frameworks/mitre.json index 97f18650f..76ff2c248 100644 --- a/frameworks/mitre.json +++ b/frameworks/mitre.json @@ -69,7 +69,7 @@ { "controlID": "C-0035", "patch": { - "name": "Cluster-admin binding" + "name": "Administrative Roles" } }, { diff --git a/frameworks/nsaframework.json b/frameworks/nsaframework.json index 68c17ef9c..c5b7d98b3 100644 --- a/frameworks/nsaframework.json +++ b/frameworks/nsaframework.json @@ -69,7 +69,7 @@ { "controlID": "C-0035", "patch": { - "name": "Cluster-admin binding" + "name": "Administrative Roles" } }, { From 712158edcc93eea707aabfbe10b774be6f36ff86 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 11 Feb 2024 17:39:57 +0200 Subject: [PATCH 096/195] add security issues controls to security fw Signed-off-by: YiscahLevySilas1 --- frameworks/security.json | 126 +++++++++++++++++++++++++++++---------- 1 file changed, 96 insertions(+), 30 deletions(-) diff --git a/frameworks/security.json b/frameworks/security.json index 42010a264..be65ebd70 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -2,7 +2,7 @@ "name": "security", "description": "Controls that are used to assess security threats.", "attributes": { - "armoBuiltin": true + "armoBuiltin": true }, "typeTags": [ "security" @@ -15,57 +15,57 @@ }, "activeControls": [ { - "controlID": "C-0009", + "controlID": "C-0005", "patch": { - "name": "Resource limits" + "name": "API server insecure port is enabled" } }, { - "controlID": "C-0017", + "controlID": "C-0009", "patch": { - "name": "Immutable container filesystem" + "name": "Resource limits" } }, - { - "controlID": "C-0256", + { + "controlID": "C-0012", "patch": { - "name": "Exposure to Internet" + "name": "Applications credentials in configuration files" } }, - { - "controlID": "C-0259", + { + "controlID": "C-0013", "patch": { - "name": "Workload with credential access" + "name": "Non-root containers" } }, { - "controlID": "C-0258", + "controlID": "C-0016", "patch": { - "name": "Workload with configMap access" + "name": "Allow privilege escalation" } }, { - "controlID": "C-0257", + "controlID": "C-0017", "patch": { - "name": "Workload with PVC access" + "name": "Immutable container filesystem" } }, { - "controlID": "C-0260", + "controlID": "C-0034", "patch": { - "name": "Missing network policy" + "name": "Automatic mapping of service account" } }, { - "controlID": "C-0261", + "controlID": "C-0035", "patch": { - "name": "ServiceAccount token mounted" + "name": "Administrative Roles" } }, { - "controlID": "C-0255", + "controlID": "C-0038", "patch": { - "name": "Workload with secret access" + "name": "Host PID/IPC privileges" } }, { @@ -98,6 +98,36 @@ "name": "HostPath mount" } }, + { + "controlID": "C-0057", + "patch": { + "name": "Privileged container" + } + }, + { + "controlID": "C-0066", + "patch": { + "name": "Secret/etcd encryption enabled" + } + }, + { + "controlID": "C-0069", + "patch": { + "name": "Disable anonymous access to Kubelet service" + } + }, + { + "controlID": "C-0070", + "patch": { + "name": "Enforce Kubelet client TLS authentication" + } + }, + { + "controlID": "C-0074", + "patch": { + "name": "Container runtime socket mounted" + } + }, { "controlID": "C-0211", "patch": { @@ -105,28 +135,64 @@ } }, { - "controlID": "C-0262", + "controlID": "C-0255", "patch": { - "name": "Anonymous access enabled" + "name": "Workload with secret access" } }, { - "controlID": "C-0265", + "controlID": "C-0256", "patch": { - "name": "Authenticated user has sensitive permissions" + "name": "Exposure to Internet" } }, { - "controlID": "C-0057", + "controlID": "C-0257", "patch": { - "name": "Privileged container" + "name": "Workload with PVC access" } }, { - "controlID": "C-0038", + "controlID": "C-0258", "patch": { - "name": "Host PID/IPC privileges" + "name": "Workload with configMap access" + } + }, + { + "controlID": "C-0259", + "patch": { + "name": "Workload with credential access" + } + }, + { + "controlID": "C-0260", + "patch": { + "name": "Missing network policy" + } + }, + { + "controlID": "C-0261", + "patch": { + "name": "ServiceAccount token mounted" + } + }, + { + "controlID": "C-0262", + "patch": { + "name": "Anonymous access enabled" + } + }, + { + "controlID": "C-0264", + "patch": { + "name": "PersistentVolume without encyption" + } + }, + { + "controlID": "C-0265", + "patch": { + "name": "Authenticated user has sensitive permissions" } } ] -} +} \ No newline at end of file From 6ce87c58f8f64afb8fcf116353831de20439f098 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 13 Feb 2024 11:11:27 +0200 Subject: [PATCH 097/195] remove C-0264 Signed-off-by: YiscahLevySilas1 --- frameworks/security.json | 6 ------ 1 file changed, 6 deletions(-) diff --git a/frameworks/security.json b/frameworks/security.json index be65ebd70..e5d2415c4 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -182,12 +182,6 @@ "name": "Anonymous access enabled" } }, - { - "controlID": "C-0264", - "patch": { - "name": "PersistentVolume without encyption" - } - }, { "controlID": "C-0265", "patch": { From bc7b6265e0ce5062bdd9c431aa10eccfac489041 Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Thu, 15 Feb 2024 09:51:45 +0200 Subject: [PATCH 098/195] Adding default exceptions (#576) * Adding default exceptions Signed-off-by: David Wertenteil * Exclude based on labels Signed-off-by: David Wertenteil * clean Signed-off-by: David Wertenteil --------- Signed-off-by: David Wertenteil --- exceptions/kubescape.json | 126 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) diff --git a/exceptions/kubescape.json b/exceptions/kubescape.json index 403314000..d74eda411 100644 --- a/exceptions/kubescape.json +++ b/exceptions/kubescape.json @@ -1,4 +1,55 @@ [ + { + "name": "kubescape-ignore", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "true" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "True" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "yes" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "1" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "enable" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "enabled" + } + } + ], + "posturePolicies": [ + {} + ] + }, { "name": "exclude-kubescape-deployment-security-context", "policyType": "postureExceptionPolicy", @@ -75,20 +126,47 @@ } ], "posturePolicies": [ + { + "controlID": "c-0076" + }, + { + "controlID": "c-0237" + }, { "controlID": "c-0055" }, + { + "controlID": "c-0056" + }, { "controlID": "c-0017" }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0030" + }, { "controlID": "c-0210" }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0207" + }, { "controlID": "c-0211" }, { "controlID": "c-0058" + }, + { + "controlID": "c-0038" } ] }, @@ -206,6 +284,9 @@ "posturePolicies": [ { "controlID": "c-0030" + }, + { + "controlID": "c-0013" } ] }, @@ -376,6 +457,14 @@ "namespace": "kubescape" } }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "storage", + "namespace": "kubescape" + } + }, { "designatorType": "Attributes", "attributes": { @@ -384,6 +473,22 @@ "namespace": "kubescape" } }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "node-agent", + "namespace": "kubescape" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kubevuln", + "namespace": "kubescape" + } + }, { "designatorType": "Attributes", "attributes": { @@ -405,8 +510,20 @@ { "controlID": "c-0034" }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0015" + }, { "controlID": "c-0053" + }, + { + "controlID": "c-0186" } ] }, @@ -529,6 +646,12 @@ { "controlID": "c-0055" }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0013" + }, { "controlID": "c-0056" }, @@ -578,6 +701,9 @@ { "controlID": "c-0034" }, + { + "controlID": "c-0260" + }, { "controlID": "c-0055" }, From 84627a0baef259379b104419e021a4dab19855fd Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Sun, 18 Feb 2024 12:33:39 +0200 Subject: [PATCH 099/195] adding kube-system exceptions (#579) Signed-off-by: David Wertenteil --- exceptions/gke.json | 126 ++++++++++++++++++++++++++++++++- exceptions/kube-apiserver.json | 2 +- exceptions/kubescape.json | 38 ++++++++++ exceptions/minikube.json | 32 +++++++++ 4 files changed, 196 insertions(+), 2 deletions(-) diff --git a/exceptions/gke.json b/exceptions/gke.json index 4423491c3..39889b9ca 100644 --- a/exceptions/gke.json +++ b/exceptions/gke.json @@ -1006,6 +1006,20 @@ "name": "validation-webhook.snapshot.storage.gke.io" } }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "gmp-operator.gmp-system.monitoring.googleapis.com" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "warden-validating.config.common-webhooks.networking.gke.io" + } + }, { "designatorType": "Attributes", "attributes": { @@ -1103,6 +1117,20 @@ "kind": "Namespace", "name": "kube-system" } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "gmp-public" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "gmp-system" + } } ], "posturePolicies": [ @@ -1142,11 +1170,107 @@ "name": "route-controller", "namespace": "kube-system" } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "superadmin", + "namespace": "kube-system" + } + + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "pkgextract-service", + "namespace": "kube-system" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "pkgextract-service", + "namespace": "kube-system" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "default", + "namespace": "gmp-system" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "collector", + "namespace": "gmp-system" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "operator", + "namespace": "gmp-system" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "collector", + "namespace": "gmp-public" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "alertmanager", + "namespace": "gmp-system" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "collector", + "namespace": "gmp-system" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "rule-evaluator", + "namespace": "gmp-system" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gmp-operator", + "namespace": "gmp-system" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "name": "gke-metrics-agent-conf", + "namespace": "kube-system" + } } ], "posturePolicies": [ { - "controlID": "c-0053" } ] } diff --git a/exceptions/kube-apiserver.json b/exceptions/kube-apiserver.json index 1061e861a..44bb4c6d3 100644 --- a/exceptions/kube-apiserver.json +++ b/exceptions/kube-apiserver.json @@ -29,7 +29,7 @@ "controlID": "c-0017" }, { - "controlID": "c-0013 " + "controlID": "c-0013" }, { "controlID": "c-0020" diff --git a/exceptions/kubescape.json b/exceptions/kubescape.json index d74eda411..27f89fec6 100644 --- a/exceptions/kubescape.json +++ b/exceptions/kubescape.json @@ -84,6 +84,14 @@ "namespace": "kubescape" } }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "synchronizer", + "namespace": "kubescape" + } + }, { "designatorType": "Attributes", "attributes": { @@ -497,6 +505,14 @@ "namespace": "kubescape" } }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "synchronizer", + "namespace": "kubescape" + } + }, { "designatorType": "Attributes", "attributes": { @@ -720,5 +736,27 @@ "controlID": "c-0076" } ] + }, + { + "name": "exclude-ns", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kubescape" + } + } + ], + "posturePolicies": [ + {} + ] } ] diff --git a/exceptions/minikube.json b/exceptions/minikube.json index d9ef5fbf2..b8654d538 100644 --- a/exceptions/minikube.json +++ b/exceptions/minikube.json @@ -41,6 +41,38 @@ "name": "coredns" } }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "sealed-secrets-controller" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "tpu-device-plugin" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "runsc-metric-server" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-.*" + } + }, { "designatorType": "Attributes", "attributes": { From 59c4730781365b7fcb6d46a307fc8748e13e490d Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 18 Feb 2024 13:01:23 +0200 Subject: [PATCH 100/195] fix fixpath Signed-off-by: YiscahLevySilas1 --- rules/set-sysctls-params/raw.rego | 12 +++-- .../test/cronjob/expected.json | 45 +++++++++------- .../set-sysctls-params/test/pod/expected.json | 44 +++++++++------- .../test/workload/expected.json | 51 +++++++++++-------- 4 files changed, 92 insertions(+), 60 deletions(-) diff --git a/rules/set-sysctls-params/raw.rego b/rules/set-sysctls-params/raw.rego index 1a9bb898c..d29244a8e 100644 --- a/rules/set-sysctls-params/raw.rego +++ b/rules/set-sysctls-params/raw.rego @@ -12,12 +12,14 @@ deny[msga] { not pod.spec.securityContext.sysctls path := "spec.securityContext.sysctls" + fixPaths := [{"path": sprintf("%s.name", [path]), "value": "YOUR_VALUE"}, + {"path": sprintf("%s.value", [path]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Pod: %v does not set 'securityContext.sysctls'", [pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, "failedPaths": [], - "fixPaths": [{"path": path, "name": "net.ipv4.tcp_syncookie", "value": "1"}], + "fixPaths": fixPaths, "alertObject": { "k8sApiObjects": [pod] } @@ -37,12 +39,14 @@ deny[msga] { not wl.spec.template.spec.securityContext.sysctls path := "spec.template.spec.securityContext.sysctls" + fixPaths := [{"path": sprintf("%s.name", [path]), "value": "YOUR_VALUE"}, + {"path": sprintf("%s.value", [path]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("Workload: %v does not set 'securityContext.sysctls'", [wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, "failedPaths": [], - "fixPaths": [{"path": path, "name": "net.ipv4.tcp_syncookie", "value": "1"}], + "fixPaths": fixPaths, "alertObject": { "k8sApiObjects": [wl] } @@ -61,12 +65,14 @@ deny[msga] { not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls path := "spec.jobTemplate.spec.template.spec.securityContext.sysctls" + fixPaths := [{"path": sprintf("%s.name", [path]), "value": "YOUR_VALUE"}, + {"path": sprintf("%s.value", [path]), "value": "YOUR_VALUE"}] msga := { "alertMessage": sprintf("CronJob: %v does not set 'securityContext.sysctls'", [cj.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, "failedPaths": [], - "fixPaths": [{"path": path, "name": "net.ipv4.tcp_syncookie", "value": "1"}], + "fixPaths": fixPaths, "alertObject": { "k8sApiObjects": [cj] } diff --git a/rules/set-sysctls-params/test/cronjob/expected.json b/rules/set-sysctls-params/test/cronjob/expected.json index 615c089c1..8b11c17bd 100644 --- a/rules/set-sysctls-params/test/cronjob/expected.json +++ b/rules/set-sysctls-params/test/cronjob/expected.json @@ -1,21 +1,30 @@ [ - { - "alertMessage": "CronJob: hello does not set 'securityContext.sysctls'", - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "fixPaths": [{"path": "spec.jobTemplate.spec.template.spec.securityContext.sysctls", "name": "net.ipv4.tcp_syncookie", "value": "1"}], - "ruleStatus": "", - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "batch/v1beta1", - "kind": "CronJob", - "metadata": { - "name": "hello" - } - } - ] + { + "alertMessage": "CronJob: hello does not set 'securityContext.sysctls'", + "packagename": "armo_builtins", + "alertScore": 7, + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.securityContext.sysctls.name", + "value": "YOUR_VALUE" + }, + { + "path": "spec.jobTemplate.spec.template.spec.securityContext.sysctls.value", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } } + ] } -] + } +] \ No newline at end of file diff --git a/rules/set-sysctls-params/test/pod/expected.json b/rules/set-sysctls-params/test/pod/expected.json index a062e2141..0f29c717f 100644 --- a/rules/set-sysctls-params/test/pod/expected.json +++ b/rules/set-sysctls-params/test/pod/expected.json @@ -1,21 +1,29 @@ [ - { - "alertMessage": "Pod: nginx does not set 'securityContext.sysctls'", - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "fixPaths": [{"path": "spec.securityContext.sysctls", "name": "net.ipv4.tcp_syncookie", "value": "1"}], - "ruleStatus": "", - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": "nginx" - } - } - ] + { + "alertMessage": "Pod: nginx does not set 'securityContext.sysctls'", + "packagename": "armo_builtins", + "alertScore": 7, + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.securityContext.sysctls.name", + "value": "YOUR_VALUE" + }, + { + "path": "spec.securityContext.sysctls.value", + "value": "YOUR_VALUE"} + ], + "ruleStatus": "", + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "nginx" + } } + ] } -] + } +] \ No newline at end of file diff --git a/rules/set-sysctls-params/test/workload/expected.json b/rules/set-sysctls-params/test/workload/expected.json index d7ac3edd2..03485d1df 100644 --- a/rules/set-sysctls-params/test/workload/expected.json +++ b/rules/set-sysctls-params/test/workload/expected.json @@ -1,24 +1,33 @@ [ - { - "alertMessage": "Workload: my-deployment does not set 'securityContext.sysctls'", - "packagename": "armo_builtins", - "alertScore": 7, - "failedPaths": [], - "fixPaths": [{"path": "spec.template.spec.securityContext.sysctls", "name": "net.ipv4.tcp_syncookie", "value": "1"}], - "ruleStatus": "", - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "name": "my-deployment", - "labels": { - "app": "goproxy" - } - } - } - ] + { + "alertMessage": "Workload: my-deployment does not set 'securityContext.sysctls'", + "packagename": "armo_builtins", + "alertScore": 7, + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.template.spec.securityContext.sysctls.name", + "value": "YOUR_VALUE" + }, + { + "path": "spec.template.spec.securityContext.sysctls.value", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "my-deployment", + "labels": { + "app": "goproxy" + } + } } + ] } -] + } +] \ No newline at end of file From f5c8dfbd9e232700b48b34966492b5b170d3da95 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 20 Feb 2024 11:33:49 +0200 Subject: [PATCH 101/195] add allowed values and keys lists to C-0012 Signed-off-by: YiscahLevySilas1 --- default-config-inputs.json | 2 ++ rules/rule-credentials-configmap/raw.rego | 22 ++++++++++++ .../rule.metadata.json | 18 ++++++++-- .../test/test-allowed-values-keys/data.json | 31 ++++++++++++++++ .../test-allowed-values-keys/expected.json | 35 +++++++++++++++++++ .../input/configmap.yaml | 20 +++++++++++ rules/rule-credentials-in-env-var/raw.rego | 29 ++++++++++++++- .../rule.metadata.json | 18 ++++++++-- .../test/pod-allowed-values-keys/data.json | 31 ++++++++++++++++ .../pod-allowed-values-keys/expected.json | 31 ++++++++++++++++ .../pod-allowed-values-keys/input/pod.yaml | 26 ++++++++++++++ 11 files changed, 256 insertions(+), 7 deletions(-) create mode 100644 rules/rule-credentials-configmap/test/test-allowed-values-keys/data.json create mode 100644 rules/rule-credentials-configmap/test/test-allowed-values-keys/expected.json create mode 100644 rules/rule-credentials-configmap/test/test-allowed-values-keys/input/configmap.yaml create mode 100644 rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/data.json create mode 100644 rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/expected.json create mode 100644 rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/input/pod.yaml diff --git a/default-config-inputs.json b/default-config-inputs.json index 043cee3e0..699fa3127 100644 --- a/default-config-inputs.json +++ b/default-config-inputs.json @@ -74,6 +74,8 @@ "_key_", "_secret_" ], + "sensitiveKeyNamesAllowed": [], + "sensitiveValuesAllowed": [], "servicesNames": [ "nifi-service", "argo-server", diff --git a/rules/rule-credentials-configmap/raw.rego b/rules/rule-credentials-configmap/raw.rego index 8507cfb6b..4b0398e48 100644 --- a/rules/rule-credentials-configmap/raw.rego +++ b/rules/rule-credentials-configmap/raw.rego @@ -12,6 +12,10 @@ deny[msga] { contains(lower(map_key), lower(key_name)) + # check that value or key weren't allowed by user + not is_allowed_value(map_secret) + not is_allowed_key_name(map_key) + path := sprintf("data[%v]", [map_key]) msga := { @@ -40,6 +44,10 @@ deny[msga] { regex.match(value , map_secret) + # check that value or key weren't allowed by user + not is_allowed_value(map_secret) + not is_allowed_key_name(map_key) + path := sprintf("data[%v]", [map_key]) msga := { @@ -70,6 +78,10 @@ deny[msga] { regex.match(value , decoded_secret) + # check that value or key weren't allowed by user + not is_allowed_value(map_secret) + not is_allowed_key_name(map_key) + path := sprintf("data[%v]", [map_key]) msga := { @@ -84,3 +96,13 @@ deny[msga] { } } } + +is_allowed_value(value) { + allow_val := data.postureControlInputs.sensitiveValuesAllowed[_] + regex.match(allow_val , value) +} + +is_allowed_key_name(key_name) { + allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_] + contains(lower(key_name), lower(allow_key)) +} \ No newline at end of file diff --git a/rules/rule-credentials-configmap/rule.metadata.json b/rules/rule-credentials-configmap/rule.metadata.json index cc08d8224..d56880a85 100644 --- a/rules/rule-credentials-configmap/rule.metadata.json +++ b/rules/rule-credentials-configmap/rule.metadata.json @@ -20,18 +20,30 @@ "ruleDependencies": [], "configInputs": [ "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames" + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" ], "controlConfigInputs": [ { "path": "settings.postureControlInputs.sensitiveValues", - "name": "Values", + "name": "Sensitive Values", "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, { "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", + "name": "Sensitive Keys", "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." } ], "description": "fails if ConfigMaps have sensitive information in configuration", diff --git a/rules/rule-credentials-configmap/test/test-allowed-values-keys/data.json b/rules/rule-credentials-configmap/test/test-allowed-values-keys/data.json new file mode 100644 index 000000000..31d85a1d4 --- /dev/null +++ b/rules/rule-credentials-configmap/test/test-allowed-values-keys/data.json @@ -0,0 +1,31 @@ +{ + "postureControlInputs": { + "sensitiveKeyNames": [ + "aws_access_key_id", + "aws_secret_access_key", + "azure_batchai_storage_account", + "azure_batchai_storage_key", + "azure_batch_account", + "azure_batch_key", + "secret", + "key", + "password", + "pwd", + "token", + "jwt", + "bearer", + "credential" + ], + "sensitiveValues": [ + "BEGIN \\w+ PRIVATE KEY", + "PRIVATE KEY", + "eyJhbGciO", + "JWT", + "Bearer", + "_key_", + "_secret_" + ], + "sensitiveKeyNamesAllowed": ["_FILE"], + "sensitiveValuesAllowed": ["my/secret/file/path"] + } +} \ No newline at end of file diff --git a/rules/rule-credentials-configmap/test/test-allowed-values-keys/expected.json b/rules/rule-credentials-configmap/test/test-allowed-values-keys/expected.json new file mode 100644 index 000000000..3726c87e1 --- /dev/null +++ b/rules/rule-credentials-configmap/test/test-allowed-values-keys/expected.json @@ -0,0 +1,35 @@ +[{ + "alertMessage": "this configmap has sensitive information: game-demo", + "deletePaths": ["data[aws_access_key_id]"], + "failedPaths": ["data[aws_access_key_id]"], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 9, + "alertObject": { + "k8sApiObjects": [{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "game-demo" + } + }] + } +}, { + "alertMessage": "this configmap has sensitive information: game-demo", + "deletePaths": ["data[pwd]"], + "failedPaths": ["data[pwd]"], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 9, + "alertObject": { + "k8sApiObjects": [{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "game-demo" + } + }] + } +}] \ No newline at end of file diff --git a/rules/rule-credentials-configmap/test/test-allowed-values-keys/input/configmap.yaml b/rules/rule-credentials-configmap/test/test-allowed-values-keys/input/configmap.yaml new file mode 100644 index 000000000..e5caa806e --- /dev/null +++ b/rules/rule-credentials-configmap/test/test-allowed-values-keys/input/configmap.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: game-demo +data: + # property-like keys; each key maps to a simple value + player_initial_lives: "3" + ui_properties_file_name: "user-interface.properties" + aws_access_key_id: "XXXX" + pwd: "hi" + aws_access_key_id_file: "/etc/secret-volume/aws" + aws_secret: "my/secret/file/path" + # file-like keys + game.properties: | + enemy.types=aliens,monsters + player.maximum-lives=5 + user-interface.properties: | + color.good=purple + color.bad=yellow + allow.textmode=true \ No newline at end of file diff --git a/rules/rule-credentials-in-env-var/raw.rego b/rules/rule-credentials-in-env-var/raw.rego index 88c6407fa..c9aadc4a1 100644 --- a/rules/rule-credentials-in-env-var/raw.rego +++ b/rules/rule-credentials-in-env-var/raw.rego @@ -11,6 +11,9 @@ contains(lower(env.name), lower(key_name)) env.value != "" + # check that value or key weren't allowed by user + not is_allowed_value(env.value) + not is_allowed_key_name(env.name) is_not_reference(env) @@ -43,6 +46,9 @@ contains(lower(env.name), lower(key_name)) env.value != "" + # check that value or key weren't allowed by user + not is_allowed_value(env.value) + not is_allowed_key_name(env.name) is_not_reference(env) @@ -72,8 +78,10 @@ env := container.env[j] contains(lower(env.name), lower(key_name)) - env.value != "" + # check that value or key weren't allowed by user + not is_allowed_value(env.value) + not is_allowed_key_name(env.name) is_not_reference(env) @@ -104,6 +112,9 @@ deny[msga] { env := container.env[j] contains(lower(env.value), lower(value)) + # check that value or key weren't allowed by user + not is_allowed_value(env.value) + not is_allowed_key_name(env.name) is_not_reference(env) @@ -135,6 +146,9 @@ deny[msga] { env := container.env[j] contains(lower(env.value), lower(value)) + # check that value or key weren't allowed by user + not is_allowed_value(env.value) + not is_allowed_key_name(env.name) is_not_reference(env) @@ -164,6 +178,9 @@ deny[msga] { env := container.env[j] contains(lower(env.value), lower(value)) + # check that value or key weren't allowed by user + not is_allowed_value(env.value) + not is_allowed_key_name(env.name) is_not_reference(env) @@ -189,3 +206,13 @@ is_not_reference(env) not env.valueFrom.secretKeyRef not env.valueFrom.configMapKeyRef } + +is_allowed_value(value) { + allow_val := data.postureControlInputs.sensitiveValuesAllowed[_] + regex.match(allow_val , value) +} + +is_allowed_key_name(key_name) { + allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_] + contains(lower(key_name), lower(allow_key)) +} \ No newline at end of file diff --git a/rules/rule-credentials-in-env-var/rule.metadata.json b/rules/rule-credentials-in-env-var/rule.metadata.json index 9859e5c1b..fde425123 100644 --- a/rules/rule-credentials-in-env-var/rule.metadata.json +++ b/rules/rule-credentials-in-env-var/rule.metadata.json @@ -46,18 +46,30 @@ "ruleDependencies": [], "configInputs": [ "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames" + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" ], "controlConfigInputs": [ { "path": "settings.postureControlInputs.sensitiveValues", - "name": "Values", + "name": "Sensitive Values", "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, { "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", + "name": "Sensitive Keys", "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." } ], "description": "fails if Pods have sensitive information in configuration", diff --git a/rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/data.json b/rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/data.json new file mode 100644 index 000000000..31d85a1d4 --- /dev/null +++ b/rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/data.json @@ -0,0 +1,31 @@ +{ + "postureControlInputs": { + "sensitiveKeyNames": [ + "aws_access_key_id", + "aws_secret_access_key", + "azure_batchai_storage_account", + "azure_batchai_storage_key", + "azure_batch_account", + "azure_batch_key", + "secret", + "key", + "password", + "pwd", + "token", + "jwt", + "bearer", + "credential" + ], + "sensitiveValues": [ + "BEGIN \\w+ PRIVATE KEY", + "PRIVATE KEY", + "eyJhbGciO", + "JWT", + "Bearer", + "_key_", + "_secret_" + ], + "sensitiveKeyNamesAllowed": ["_FILE"], + "sensitiveValuesAllowed": ["my/secret/file/path"] + } +} \ No newline at end of file diff --git a/rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/expected.json b/rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/expected.json new file mode 100644 index 000000000..5e40ddd37 --- /dev/null +++ b/rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/expected.json @@ -0,0 +1,31 @@ +[ + { + "alertMessage": "Pod: audit-pod has sensitive information in environment variables", + "deletePaths": [ + "spec.containers[0].env[1].name", + "spec.containers[0].env[1].value" + ], + "failedPaths": [ + "spec.containers[0].env[1].name", + "spec.containers[0].env[1].value" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 9, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "app": "audit-pod" + }, + "name": "audit-pod" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/input/pod.yaml b/rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/input/pod.yaml new file mode 100644 index 000000000..252eeeee6 --- /dev/null +++ b/rules/rule-credentials-in-env-var/test/pod-allowed-values-keys/input/pod.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: audit-pod + labels: + app: audit-pod +spec: + containers: + - name: test-container + env : + - name : random + value : "Hello from the environment" + - name: some-name + value: my_key_value + image: hashicorp/http-echo:0.2.3 + securityContext: + allowPrivilegeEscalation: true + - name : test-container2 + env : + - name : random + value : "Hello from the environment" + - name: AWS_TOKEN_FILE + value: /etc/secret-volume/aws + - name: my_password + value: my/secret/file/path + image : hashicorp/http-echo:0.2.3 \ No newline at end of file From bab1d7b6f5a7ecb7ec3d53a773fea77a7915f07c Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Thu, 22 Feb 2024 14:54:49 +0200 Subject: [PATCH 102/195] remove default exceptions Signed-off-by: David Wertenteil --- exceptions/kubescape.json | 51 --------------------------------------- 1 file changed, 51 deletions(-) diff --git a/exceptions/kubescape.json b/exceptions/kubescape.json index 27f89fec6..824efe6d9 100644 --- a/exceptions/kubescape.json +++ b/exceptions/kubescape.json @@ -1,55 +1,4 @@ [ - { - "name": "kubescape-ignore", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "true" - } - }, - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "True" - } - }, - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "yes" - } - }, - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "1" - } - }, - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "enable" - } - }, - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "enabled" - } - } - ], - "posturePolicies": [ - {} - ] - }, { "name": "exclude-kubescape-deployment-security-context", "policyType": "postureExceptionPolicy", From 38394fad71c9d62b45eb32f4746eb3be72f24fbf Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Thu, 22 Feb 2024 16:31:39 +0200 Subject: [PATCH 103/195] Adding "create release without system tests" gh action Signed-off-by: David Wertenteil --- .../create-releas-without-tests.yaml | 124 ++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 .github/workflows/create-releas-without-tests.yaml diff --git a/.github/workflows/create-releas-without-tests.yaml b/.github/workflows/create-releas-without-tests.yaml new file mode 100644 index 000000000..886487d91 --- /dev/null +++ b/.github/workflows/create-releas-without-tests.yaml @@ -0,0 +1,124 @@ +name: create release without system tests +on: + workflow_dispatch: + inputs: + TAG: + description: 'Tag name' + required: true + type: string + +env: + REGO_ARTIFACT_KEY_NAME: rego_artifact + REGO_ARTIFACT_PATH: release + +jobs: + # build regolibrary artifacts / test rego dependencies / test rego unit-tests + build-and-rego-test: + name: Build and test rego artifacts + runs-on: ubuntu-latest + outputs: + NEW_TAG: ${{ steps.tag-calculator.outputs.NEW_TAG }} + REGO_ARTIFACT_KEY_NAME: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_KEY_NAME }} + REGO_ARTIFACT_PATH: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_PATH }} + steps: + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f + name: checkout repo content + with: + token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + + - id: tag-calculator + uses: kubescape/workflows/.github/actions/tag-action@main + with: + ORIGINAL_TAG: ${{ inputs.TAG }} + SUB_STRING: "-rc" + + # Test using Golang OPA hot rule compilation + - name: Set up Go + uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 + with: + go-version: '1.20' + + - name: setup python + uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa + with: + python-version: 3.10.6 + + # generating subsections ids + - name: Update frameworks subsections + run: python ./scripts/generate_subsections_ids.py + + # validate control-ID duplications + - run: python ./scripts/validations.py + + # run export script to generate regolibrary artifacts + - run: python ./scripts/export.py + + # removing release artifacts file extensions + - name: Strip Metadata Files Extensions + run: | + cd release + find -type f -name '*.json' | while read f; do mv "$f" "${f%.json}"; done + find -type f -name '*.csv' | while read f; do mv "$f" "${f%.csv}"; done + + - run: ls -laR + + - name: Set outputs + id: set_outputs + run: | + echo "REGO_ARTIFACT_KEY_NAME=${{ env.REGO_ARTIFACT_KEY_NAME }}" >> $GITHUB_OUTPUT + echo "REGO_ARTIFACT_PATH=${{ env.REGO_ARTIFACT_PATH }}" >> $GITHUB_OUTPUT + + - uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # ratchet:actions/upload-artifact@v3.1.1 + name: Upload artifact + with: + name: ${{ env.REGO_ARTIFACT_KEY_NAME }} + path: ${{ env.REGO_ARTIFACT_PATH }}/ + if-no-files-found: error + + # start release process + release: + if: ${{ (always() && (contains(needs.*.result, 'success')) && !(contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }} + name: create release and upload assets + needs: [build-and-rego-test] + runs-on: ubuntu-latest + steps: + - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # ratchet:actions/download-artifact@v3.0.2 + id: download-artifact + with: + name: ${{ env.REGO_ARTIFACT_KEY_NAME }} + path: ${{ env.REGO_ARTIFACT_PATH }} + + - name: Create Release and upload assets + id: create_release_upload_assets + uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 + with: + token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + name: Release ${{ needs.build-and-rego-test.outputs.NEW_TAG }} + tag_name: ${{ needs.build-and-rego-test.outputs.NEW_TAG }} + draft: false + fail_on_unmatched_files: true + prerelease: false + files: '${{ env.REGO_ARTIFACT_PATH }}/*' + + # Update regolibrary documentation with latest controls and rules. + update-documentation: + needs: [release] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # ratchet:actions/checkout@v3.5.2 + name: checkout repo content + - name: setup python + uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # ratchet:actions/setup-python@v4.6.0 + with: + python-version: 3.8 + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install requests + - name: execute upload script + env: + README_API_KEY: ${{ secrets.README_API_KEY }} + run: |- + python ./scripts/upload-readme.py + - name: execute docs generator script + run: python ./scripts/mk-generator.py # Script to generate controls library documentation From 1f4b04e1c760ed3813fdf671fe4b075670650371 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 22 Feb 2024 17:30:47 +0200 Subject: [PATCH 104/195] clarify remediation Signed-off-by: YiscahLevySilas1 --- controls/C-0013-nonrootcontainers.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controls/C-0013-nonrootcontainers.json b/controls/C-0013-nonrootcontainers.json index ecf9a146f..e62fd8a29 100644 --- a/controls/C-0013-nonrootcontainers.json +++ b/controls/C-0013-nonrootcontainers.json @@ -7,12 +7,12 @@ ] }, "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext or container securityContext and use user ID 1000 or higher, or make sure that runAsNonRoot is true.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", "rulesNames": [ "non-root-containers" ], "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser and runAsGroup are set to a user id greater than 0, or that runAsNonRoot is set to true. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", "controlID": "C-0013", "baseScore": 6.0, "example": "@controls/examples/c013.yaml", From 7c015c8600f4a0cef44823c4bae309915695bc34 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 25 Feb 2024 11:05:38 +0200 Subject: [PATCH 105/195] rename C-0007 to `Roles with delete capabilities` Signed-off-by: YiscahLevySilas1 --- controls/C-0007-datadestruction.json | 2 +- frameworks/allcontrols.json | 2 +- frameworks/clusterscan.json | 2 +- frameworks/mitre.json | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/controls/C-0007-datadestruction.json b/controls/C-0007-datadestruction.json index ae5fb367f..3b6ad93d8 100644 --- a/controls/C-0007-datadestruction.json +++ b/controls/C-0007-datadestruction.json @@ -1,5 +1,5 @@ { - "name": "Data Destruction", + "name": "Roles with delete capabilities", "attributes": { "microsoftMitreColumns": [ "Impact" diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index 2ebf4eed0..e730e11fc 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -39,7 +39,7 @@ { "controlID": "C-0007", "patch": { - "name": "Data Destruction" + "name": "Roles with delete capabilities" } }, { diff --git a/frameworks/clusterscan.json b/frameworks/clusterscan.json index 2a4501b1b..65238e9c3 100644 --- a/frameworks/clusterscan.json +++ b/frameworks/clusterscan.json @@ -59,7 +59,7 @@ { "controlID": "C-0007", "patch": { - "name": "Data Destruction" + "name": "Roles with delete capabilities" } }, { diff --git a/frameworks/mitre.json b/frameworks/mitre.json index 76ff2c248..f043c7162 100644 --- a/frameworks/mitre.json +++ b/frameworks/mitre.json @@ -21,7 +21,7 @@ { "controlID": "C-0007", "patch": { - "name": "Data Destruction" + "name": "Roles with delete capabilities" } }, { From 46801a8d9e64b23f540f5d6b8c9f426055af543c Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 25 Feb 2024 11:09:27 +0200 Subject: [PATCH 106/195] rename C-0002 to `Prevent containers from allowing command execution` Signed-off-by: YiscahLevySilas1 --- controls/C-0002-execintocontainer.json | 2 +- frameworks/allcontrols.json | 2 +- frameworks/armobest.json | 2 +- frameworks/clusterscan.json | 2 +- frameworks/mitre.json | 2 +- frameworks/nsaframework.json | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/controls/C-0002-execintocontainer.json b/controls/C-0002-execintocontainer.json index 39c9b93f3..c77af143e 100644 --- a/controls/C-0002-execintocontainer.json +++ b/controls/C-0002-execintocontainer.json @@ -1,5 +1,5 @@ { - "name": "Exec into container", + "name": "Prevent containers from allowing command execution", "attributes": { "microsoftMitreColumns": [ "Execution" diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index 2ebf4eed0..d8d758131 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -21,7 +21,7 @@ { "controlID": "C-0002", "patch": { - "name": "Exec into container" + "name": "Prevent containers from allowing command execution" } }, { diff --git a/frameworks/armobest.json b/frameworks/armobest.json index bb2196030..7d7dcc6db 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -21,7 +21,7 @@ { "controlID": "C-0002", "patch": { - "name": "Exec into container" + "name": "Prevent containers from allowing command execution" } }, { diff --git a/frameworks/clusterscan.json b/frameworks/clusterscan.json index 2a4501b1b..af515d0ec 100644 --- a/frameworks/clusterscan.json +++ b/frameworks/clusterscan.json @@ -53,7 +53,7 @@ { "controlID": "C-0002", "patch": { - "name": "Exec into container" + "name": "Prevent containers from allowing command execution" } }, { diff --git a/frameworks/mitre.json b/frameworks/mitre.json index 76ff2c248..cce4bf61c 100644 --- a/frameworks/mitre.json +++ b/frameworks/mitre.json @@ -15,7 +15,7 @@ { "controlID": "C-0002", "patch": { - "name": "Exec into container" + "name": "Prevent containers from allowing command execution" } }, { diff --git a/frameworks/nsaframework.json b/frameworks/nsaframework.json index c5b7d98b3..7b2a33609 100644 --- a/frameworks/nsaframework.json +++ b/frameworks/nsaframework.json @@ -15,7 +15,7 @@ { "controlID": "C-0002", "patch": { - "name": "Exec into container" + "name": "Prevent containers from allowing command execution" } }, { From b9cc82c9f84d5c4696b194e5490921449c87fc8b Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 25 Feb 2024 11:46:38 +0200 Subject: [PATCH 107/195] delete C-0001 Signed-off-by: YiscahLevySilas1 --- .../C-0001-forbiddencontainerregistries.json | 33 ------- ...91-cve202247633kyvernosignaturebypass.json | 2 +- default-config-inputs.json | 2 - exceptions/kubescape-prometheus.json | 6 -- exceptions/kubescape.json | 3 - frameworks/allcontrols.json | 6 -- frameworks/armobest.json | 6 -- .../raw.rego | 55 ----------- .../rule.metadata.json | 67 ------------- .../raw.rego | 98 ------------------- .../rule.metadata.json | 67 ------------- .../test/cronjob/data.json | 7 -- .../test/cronjob/expected.json | 26 ----- .../test/cronjob/input/cronjob.yaml | 24 ----- .../test/pod/data.json | 7 -- .../test/pod/expected.json | 1 - .../test/pod/input/pod.yaml | 13 --- .../test/workloads/data.json | 7 -- .../test/workloads/expected.json | 29 ------ .../test/workloads/input/deployment.yaml | 40 -------- 20 files changed, 1 insertion(+), 498 deletions(-) delete mode 100644 controls/C-0001-forbiddencontainerregistries.json delete mode 100644 rules/rule-identify-blocklisted-image-registries-v1/raw.rego delete mode 100644 rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json delete mode 100644 rules/rule-identify-blocklisted-image-registries/raw.rego delete mode 100644 rules/rule-identify-blocklisted-image-registries/rule.metadata.json delete mode 100644 rules/rule-identify-blocklisted-image-registries/test/cronjob/data.json delete mode 100644 rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json delete mode 100644 rules/rule-identify-blocklisted-image-registries/test/cronjob/input/cronjob.yaml delete mode 100644 rules/rule-identify-blocklisted-image-registries/test/pod/data.json delete mode 100644 rules/rule-identify-blocklisted-image-registries/test/pod/expected.json delete mode 100644 rules/rule-identify-blocklisted-image-registries/test/pod/input/pod.yaml delete mode 100644 rules/rule-identify-blocklisted-image-registries/test/workloads/data.json delete mode 100644 rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json delete mode 100644 rules/rule-identify-blocklisted-image-registries/test/workloads/input/deployment.yaml diff --git a/controls/C-0001-forbiddencontainerregistries.json b/controls/C-0001-forbiddencontainerregistries.json deleted file mode 100644 index de918c769..000000000 --- a/controls/C-0001-forbiddencontainerregistries.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "Forbidden Container Registries", - "attributes": { - "microsoftMitreColumns": [ - "Initial Access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "actionRequired": "configuration" - }, - "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", - "remediation": "Limit the registries from which you pull container images from", - "rulesNames": [ - "rule-identify-blocklisted-image-registries", - "rule-identify-blocklisted-image-registries-v1" - ], - "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", - "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", - "controlID": "C-0001", - "baseScore": 7.0, - "example": "@controls/examples/c001.yaml", - "category": { - "name" : "Workload" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - } -} \ No newline at end of file diff --git a/controls/C-0091-cve202247633kyvernosignaturebypass.json b/controls/C-0091-cve202247633kyvernosignaturebypass.json index 26d40f3be..85cc74800 100644 --- a/controls/C-0091-cve202247633kyvernosignaturebypass.json +++ b/controls/C-0091-cve202247633kyvernosignaturebypass.json @@ -10,7 +10,7 @@ "rulesNames": [ "CVE-2022-47633" ], - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0078 for limiting the use of trusted repositories.", "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", "controlID": "C-0091", "baseScore": 8.0, diff --git a/default-config-inputs.json b/default-config-inputs.json index 699fa3127..779b5c5bd 100644 --- a/default-config-inputs.json +++ b/default-config-inputs.json @@ -37,7 +37,6 @@ "bin/busybox", "usr/bin/busybox" ], - "publicRegistries": [], "sensitiveInterfaces": [ "nifi", "argo-server", @@ -85,7 +84,6 @@ "weave-scope-app", "kubernetes-dashboard" ], - "untrustedRegistries": [], "memory_request_max": [], "memory_request_min": ["0"], "memory_limit_max": [], diff --git a/exceptions/kubescape-prometheus.json b/exceptions/kubescape-prometheus.json index fe83aff47..ae712885a 100644 --- a/exceptions/kubescape-prometheus.json +++ b/exceptions/kubescape-prometheus.json @@ -53,9 +53,6 @@ } ], "posturePolicies": [ - { - "controlID": "c-0001" - }, { "controlID": "c-0078" } @@ -140,9 +137,6 @@ } ], "posturePolicies": [ - { - "controlID": "c-0001" - }, { "controlID": "c-0078" } diff --git a/exceptions/kubescape.json b/exceptions/kubescape.json index 824efe6d9..34b2187ed 100644 --- a/exceptions/kubescape.json +++ b/exceptions/kubescape.json @@ -179,9 +179,6 @@ } ], "posturePolicies": [ - { - "controlID": "c-0001" - }, { "controlID": "c-0078" } diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index 2ebf4eed0..bdee54df9 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -12,12 +12,6 @@ }, "typeTags": ["compliance"], "activeControls": [ - { - "controlID": "C-0001", - "patch": { - "name": "Forbidden Container Registries" - } - }, { "controlID": "C-0002", "patch": { diff --git a/frameworks/armobest.json b/frameworks/armobest.json index bb2196030..aa5c1bd55 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -12,12 +12,6 @@ }, "typeTags": ["compliance"], "activeControls": [ - { - "controlID": "C-0001", - "patch": { - "name": "Forbidden Container Registries" - } - }, { "controlID": "C-0002", "patch": { diff --git a/rules/rule-identify-blocklisted-image-registries-v1/raw.rego b/rules/rule-identify-blocklisted-image-registries-v1/raw.rego deleted file mode 100644 index b6d018d2f..000000000 --- a/rules/rule-identify-blocklisted-image-registries-v1/raw.rego +++ /dev/null @@ -1,55 +0,0 @@ -package armo_builtins - -untrustedImageRepo[msga] { - wl := input[_] - containers_path := get_containers_path(wl) - containers := object.get(wl, containers_path, []) - container := containers[i] - name := image.parse_normalized_name(container.image) - untrusted_or_public_registries(name) - path := sprintf("%s[%d].image", [concat(".", containers_path), i]) - - msga := { - "alertMessage": sprintf("image '%v' in container '%s' comes from untrusted registry", [name, container.name]), - "packagename": "armo_builtins", - "alertScore": 2, - "fixPaths": [], - "reviewPaths": [path], - "failedPaths": [path], - "alertObject": {"k8sApiObjects": [wl]}, - } -} - -untrusted_or_public_registries(image){ - # see default-config-inputs.json for list values - untrusted_registries := data.postureControlInputs.untrustedRegistries - registry := untrusted_registries[_] - startswith(image, registry) - -} - -untrusted_or_public_registries(image){ - # see default-config-inputs.json for list values - public_registries := data.postureControlInputs.publicRegistries - registry := public_registries[_] - startswith(image, registry) -} - -# get_containers_path - get resource containers paths for {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} -get_containers_path(resource) := result { - resource_kinds := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} - resource_kinds[resource.kind] - result = ["spec", "template", "spec", "containers"] -} - -# get_containers_path - get resource containers paths for "Pod" -get_containers_path(resource) := result { - resource.kind == "Pod" - result = ["spec", "containers"] -} - -# get_containers_path - get resource containers paths for "CronJob" -get_containers_path(resource) := result { - resource.kind == "CronJob" - result = ["spec", "jobTemplate", "spec", "template", "spec", "containers"] -} \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json b/rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json deleted file mode 100644 index 5a3221ad4..000000000 --- a/rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "name": "rule-identify-blocklisted-image-registries-v1", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "useFromKubescapeVersion": "v2.9.0" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.publicRegistries", - "settings.postureControlInputs.untrustedRegistries" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.publicRegistries", - "name": "Public registries", - "description": "Kubescape checks none of these public container registries are in use." - }, - { - "path": "settings.postureControlInputs.untrustedRegistries", - "name": "Registries block list", - "description": "Kubescape checks none of these user-provided container registries are in use." - } - ], - "description": "Identifying if pod container images are from unallowed registries", - "remediation": "Use images from safe registry", - "ruleQuery": "" -} \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/raw.rego b/rules/rule-identify-blocklisted-image-registries/raw.rego deleted file mode 100644 index add46113a..000000000 --- a/rules/rule-identify-blocklisted-image-registries/raw.rego +++ /dev/null @@ -1,98 +0,0 @@ -package armo_builtins - -# Check for images from blocklisted repos - -untrustedImageRepo[msga] { - pod := input[_] - k := pod.kind - k == "Pod" - container := pod.spec.containers[i] - path := sprintf("spec.containers[%v].image", [format_int(i, 10)]) - image := container.image - untrusted_or_public_registries(image) - - msga := { - "alertMessage": sprintf("image '%v' in container '%s' comes from untrusted registry", [image, container.name]), - "packagename": "armo_builtins", - "alertScore": 2, - "fixPaths": [], - "reviewPaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [pod] - } - } -} - -untrustedImageRepo[msga] { - wl := input[_] - spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} - spec_template_spec_patterns[wl.kind] - container := wl.spec.template.spec.containers[i] - path := sprintf("spec.template.spec.containers[%v].image", [format_int(i, 10)]) - image := container.image - untrusted_or_public_registries(image) - - msga := { - "alertMessage": sprintf("image '%v' in container '%s' comes from untrusted registry", [image, container.name]), - "packagename": "armo_builtins", - "alertScore": 2, - "fixPaths": [], - "reviewPaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [wl] - } - } -} - -untrustedImageRepo[msga] { - wl := input[_] - wl.kind == "CronJob" - container := wl.spec.jobTemplate.spec.template.spec.containers[i] - path := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].image", [format_int(i, 10)]) - image := container.image - untrusted_or_public_registries(image) - - msga := { - "alertMessage": sprintf("image '%v' in container '%s' comes from untrusted registry", [image, container.name]), - "packagename": "armo_builtins", - "alertScore": 2, - "fixPaths": [], - "reviewPaths": [path], - "failedPaths": [path], - "alertObject": { - "k8sApiObjects": [wl] - } - } -} - -untrusted_or_public_registries(image){ - # see default-config-inputs.json for list values - untrusted_registries := data.postureControlInputs.untrustedRegistries - registry := untrusted_registries[_] - regex.match(regexify(registry), docker_host_wrapper(image)) -} - -untrusted_or_public_registries(image){ - # see default-config-inputs.json for list values - public_registries := data.postureControlInputs.publicRegistries - registry := public_registries[_] - regex.match(regexify(registry), docker_host_wrapper(image)) -} - - -# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'. -# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub. -docker_host_wrapper(image) = result { - not contains(image, "/") - result := sprintf("docker.io/%s", [image]) -} else := image - - - -# regexify - returns a registry regex to be searched only for the image host. -regexify(registry) := result { - endswith(registry, "/") - result = sprintf("^%s.*$", [registry]) -} else := sprintf("^%s\/.*$", [registry]) diff --git a/rules/rule-identify-blocklisted-image-registries/rule.metadata.json b/rules/rule-identify-blocklisted-image-registries/rule.metadata.json deleted file mode 100644 index 7537a8bc1..000000000 --- a/rules/rule-identify-blocklisted-image-registries/rule.metadata.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "name": "rule-identify-blocklisted-image-registries", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "useUntilKubescapeVersion": "v2.3.8" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.publicRegistries", - "settings.postureControlInputs.untrustedRegistries" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.publicRegistries", - "name": "Public registries", - "description": "Kubescape checks none of these public container registries are in use." - }, - { - "path": "settings.postureControlInputs.untrustedRegistries", - "name": "Registries block list", - "description": "Kubescape checks none of these user-provided container registries are in use." - } - ], - "description": "Identifying if pod container images are from unallowed registries", - "remediation": "Use images from safe registry", - "ruleQuery": "" -} \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/cronjob/data.json b/rules/rule-identify-blocklisted-image-registries/test/cronjob/data.json deleted file mode 100644 index 2878e4e40..000000000 --- a/rules/rule-identify-blocklisted-image-registries/test/cronjob/data.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "postureControlInputs": { - "untrustedRegistries": [ - "quay.io" - ] - } -} diff --git a/rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json b/rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json deleted file mode 100644 index d8b342287..000000000 --- a/rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json +++ /dev/null @@ -1,26 +0,0 @@ -[ - { - "alertMessage": "image 'quay.io/hi:latest' in container 'hello' comes from untrusted registry", - "reviewPaths": [ - "spec.jobTemplate.spec.template.spec.containers[0].image" - ], - "failedPaths": [ - "spec.jobTemplate.spec.template.spec.containers[0].image" - ], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 2, - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "batch/v1beta1", - "kind": "CronJob", - "metadata": { - "name": "hello" - } - } - ] - } - } -] \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/cronjob/input/cronjob.yaml b/rules/rule-identify-blocklisted-image-registries/test/cronjob/input/cronjob.yaml deleted file mode 100644 index 16e3f48aa..000000000 --- a/rules/rule-identify-blocklisted-image-registries/test/cronjob/input/cronjob.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: hello -spec: - schedule: "*/1 * * * *" - jobTemplate: - spec: - template: - spec: - serviceAccountName: kubernetes-dashboard - restartPolicy: OnFailure - containers: - - name: hello - image: quay.io/hi:latest - env : - - - name : pwd - value : "Hpwd" - imagePullPolicy: IfNotPresent - command: - - /bin/sh - - -c - - date; echo Hello from the Kubernetes cluster diff --git a/rules/rule-identify-blocklisted-image-registries/test/pod/data.json b/rules/rule-identify-blocklisted-image-registries/test/pod/data.json deleted file mode 100644 index 709abae38..000000000 --- a/rules/rule-identify-blocklisted-image-registries/test/pod/data.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "postureControlInputs": { - "publicRegistries": [ - "docker.io" - ] - } -} \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/pod/expected.json b/rules/rule-identify-blocklisted-image-registries/test/pod/expected.json deleted file mode 100644 index 0637a088a..000000000 --- a/rules/rule-identify-blocklisted-image-registries/test/pod/expected.json +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/pod/input/pod.yaml b/rules/rule-identify-blocklisted-image-registries/test/pod/input/pod.yaml deleted file mode 100644 index 6d777e7aa..000000000 --- a/rules/rule-identify-blocklisted-image-registries/test/pod/input/pod.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: audit-pod - labels: - app: audit-pod -spec: - containers: - - name: test-container - env : - - name : azure_batch_key - value : "Hello from the environment" - image: quay.io/http-echo:0.2.3 diff --git a/rules/rule-identify-blocklisted-image-registries/test/workloads/data.json b/rules/rule-identify-blocklisted-image-registries/test/workloads/data.json deleted file mode 100644 index 1ebb4b84d..000000000 --- a/rules/rule-identify-blocklisted-image-registries/test/workloads/data.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "postureControlInputs": { - "publicRegistries": [ - "registry.hub.docker.com" - ] - } -} \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json b/rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json deleted file mode 100644 index 9e2873142..000000000 --- a/rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json +++ /dev/null @@ -1,29 +0,0 @@ -[ - { - "alertMessage": "image 'registry.hub.docker.com/php:7.0-apache' in container 'php' comes from untrusted registry", - "reviewPaths": [ - "spec.template.spec.containers[1].image" - ], - "failedPaths": [ - "spec.template.spec.containers[1].image" - ], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 2, - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "labels": { - "app": "goproxy" - }, - "name": "my-deployment" - } - } - ] - } - } -] \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/workloads/input/deployment.yaml b/rules/rule-identify-blocklisted-image-registries/test/workloads/input/deployment.yaml deleted file mode 100644 index b7889e636..000000000 --- a/rules/rule-identify-blocklisted-image-registries/test/workloads/input/deployment.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-deployment - labels: - app : goproxy -spec: - selector: - matchLabels: - app : goproxy - template: - metadata : - name : goproxy - labels : - app : goproxy - spec : - hostNetwork: true - containers : - - - name : mysql - image : mysql - securityContext: - allowPrivilegeEscalation: true - env : - - - name : MYSQL_ROOT_PASSWORD - value : "rootpasswd" - - - name : php - image : registry.hub.docker.com/php:7.0-apache - volumeMounts : - - - mountPath : /var/www/html - name : site-data - subPath : html - volumes : - - - name : site-data - persistentVolumeClaim : - claimName : my-lamp-site-data \ No newline at end of file From 40a89eb6615ae4054c2830c5d45f6cc5a8f47a1a Mon Sep 17 00:00:00 2001 From: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> Date: Mon, 26 Feb 2024 15:30:32 +0200 Subject: [PATCH 108/195] Update push-releasedev-updates.yaml Signed-off-by: Yuval Leibovich <89763818+yuleib@users.noreply.github.com> --- .github/workflows/push-releasedev-updates.yaml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/push-releasedev-updates.yaml b/.github/workflows/push-releasedev-updates.yaml index b722a6dfe..c131d356e 100644 --- a/.github/workflows/push-releasedev-updates.yaml +++ b/.github/workflows/push-releasedev-updates.yaml @@ -4,6 +4,9 @@ on: push: branches: [master] +env: + GH_ACCESS_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + jobs: build: runs-on: ubuntu-latest @@ -12,8 +15,11 @@ jobs: with: ref: ${{ github.head_ref }} fetch-depth: 0 - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + token: ${{ env.GH_ACCESS_TOKEN }} + - run: git config --global url.https://${{ env.GH_ACCESS_TOKEN }}@github.com/armosec/.insteadOf https://github.com/armosec/ + - run: git config --global url.https://${{ env.GH_ACCESS_TOKEN }}@github.com/kubescape/.insteadOf https://github.com/kubescape/ + - name: Run export script run: | OUTPUT=pre-release python ./scripts/export.py @@ -30,6 +36,6 @@ jobs: - name: Push changes uses: ad-m/github-push-action@master with: - github_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + github_token: ${{ env.GH_ACCESS_TOKEN }} repository: kubescape/regolibrary-dev - force: true \ No newline at end of file + force: true From 0cb3c1c383d5b3c9038d2c929cac3241b16a07ac Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Mon, 26 Feb 2024 15:57:40 +0200 Subject: [PATCH 109/195] Removed cosign controls from default workload scan FW Signed-off-by: David Wertenteil --- frameworks/workloadscan.json | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/frameworks/workloadscan.json b/frameworks/workloadscan.json index be2afbeab..ff357a05d 100644 --- a/frameworks/workloadscan.json +++ b/frameworks/workloadscan.json @@ -14,18 +14,6 @@ "name": "Images from allowed registry" } }, - { - "controlID": "C-0236", - "patch": { - "name": "Verify image signature" - } - }, - { - "controlID": "C-0237", - "patch": { - "name": "Check if signature exists" - } - }, { "controlID": "C-0004", "patch": { From 54064a47701a6b5afbd668339d41886b97634f68 Mon Sep 17 00:00:00 2001 From: yonatanamz Date: Wed, 28 Feb 2024 11:20:52 +0200 Subject: [PATCH 110/195] Updated GH_ACCESS_TOKEN in push-releasedev-updates.yaml workflow Signed-off-by: yonatanamz --- .github/workflows/push-releasedev-updates.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/push-releasedev-updates.yaml b/.github/workflows/push-releasedev-updates.yaml index c131d356e..633ed838a 100644 --- a/.github/workflows/push-releasedev-updates.yaml +++ b/.github/workflows/push-releasedev-updates.yaml @@ -5,7 +5,7 @@ on: branches: [master] env: - GH_ACCESS_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + GH_ACCESS_TOKEN: ${{ secrets.ARMOSEC_ACCESS_KEY }} jobs: build: @@ -19,7 +19,7 @@ jobs: - run: git config --global url.https://${{ env.GH_ACCESS_TOKEN }}@github.com/armosec/.insteadOf https://github.com/armosec/ - run: git config --global url.https://${{ env.GH_ACCESS_TOKEN }}@github.com/kubescape/.insteadOf https://github.com/kubescape/ - + - name: Run export script run: | OUTPUT=pre-release python ./scripts/export.py From 54a146be0e5c277f3714b40673e3d6ecb22f5731 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 29 Feb 2024 14:14:53 +0200 Subject: [PATCH 111/195] add control Workload with cluster takeover roles Signed-off-by: YiscahLevySilas1 --- ...0267-workloadwithclustertakeoverroles.json | 22 + go.mod | 3 +- go.sum | 3 + .../filter.rego | 32 + .../raw.rego | 138 +++ .../rule.metadata.json | 63 + .../test/fail-wl-creates-pod/expected.json | 59 + .../input/clusterrole.yaml | 8 + .../input/clusterrolebinding.yaml | 15 + .../test/fail-wl-creates-pod/input/file.yaml | 17 + .../test/fail-wl-creates-pod/input/sa.json | 17 + .../test/fail-wl-gets-secrets/expected.json | 59 + .../input/clusterrole.yaml | 8 + .../input/clusterrolebinding.yaml | 15 + .../test/fail-wl-gets-secrets/input/file.yaml | 17 + .../test/fail-wl-gets-secrets/input/sa.json | 17 + .../pass-wl-limited-permissions/expected.json | 1 + .../input/clusterrole.yaml | 8 + .../input/clusterrolebinding.yaml | 15 + .../input/file.yaml | 17 + .../pass-wl-limited-permissions/input/sa.json | 17 + .../pass-wl-not-mount-sa-token/expected.json | 1 + .../input/clusterrole.yaml | 8 + .../input/clusterrolebinding.yaml | 15 + .../input/file.yaml | 17 + .../pass-wl-not-mount-sa-token/input/sa.json | 17 + .../test/pass-wl-rolebinding/expected.json | 1 + .../pass-wl-rolebinding/input/cluterrole.yaml | 8 + .../test/pass-wl-rolebinding/input/file.yaml | 17 + .../input/rolebinding.yaml | 13 + .../test/pass-wl-rolebinding/input/sa.json | 17 + testrunner/go.mod | 229 ++-- testrunner/go.sum | 1079 +++++++---------- 33 files changed, 1244 insertions(+), 729 deletions(-) create mode 100644 controls/C-0267-workloadwithclustertakeoverroles.json create mode 100644 rules/workload-with-cluster-takeover-roles/filter.rego create mode 100644 rules/workload-with-cluster-takeover-roles/raw.rego create mode 100644 rules/workload-with-cluster-takeover-roles/rule.metadata.json create mode 100644 rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json create mode 100644 rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/clusterrole.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/clusterrolebinding.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/file.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/sa.json create mode 100644 rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json create mode 100644 rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/clusterrole.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/clusterrolebinding.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/file.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/sa.json create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/expected.json create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/clusterrole.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/clusterrolebinding.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/file.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/sa.json create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/expected.json create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/clusterrole.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/clusterrolebinding.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/file.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/sa.json create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/expected.json create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/cluterrole.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/file.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/rolebinding.yaml create mode 100644 rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/sa.json diff --git a/controls/C-0267-workloadwithclustertakeoverroles.json b/controls/C-0267-workloadwithclustertakeoverroles.json new file mode 100644 index 000000000..6db278347 --- /dev/null +++ b/controls/C-0267-workloadwithclustertakeoverroles.json @@ -0,0 +1,22 @@ +{ + "name": "Workload with cluster takeover roles", + "attributes": {}, + "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", + "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", + "rulesNames": [ + "workload-with-cluster-takeover-roles" + ], + "long_description": "In Kubernetes, workloads with overly permissive roles pose a significant security risk. When a workload is granted roles that exceed the necessities of its operation, it creates an attack surface for privilege escalation within the cluster. This is especially critical if the roles include permissions for creating, updating, or accessing sensitive resources or secrets. An attacker exploiting such a workload can leverage these excessive privileges to perform unauthorized actions, potentially leading to a full cluster takeover. Ensuring that each service account associated with a workload is limited to permissions that are strictly necessary for its function is crucial in mitigating the risk of cluster takeovers.", + "test": "Check if the service account used by a workload has cluster takeover roles.", + "controlID": "C-0267", + "baseScore": 3.0, + "category": { + "name" : "Workload" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } +} diff --git a/go.mod b/go.mod index 68c59d1c3..c50b0e999 100644 --- a/go.mod +++ b/go.mod @@ -63,10 +63,11 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang/glog v1.2.0 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.4 // indirect github.com/google/uuid v1.3.0 // indirect diff --git a/go.sum b/go.sum index 9d5fc8a1e..c097ccc38 100644 --- a/go.sum +++ b/go.sum @@ -225,6 +225,8 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -275,6 +277,7 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= diff --git a/rules/workload-with-cluster-takeover-roles/filter.rego b/rules/workload-with-cluster-takeover-roles/filter.rego new file mode 100644 index 000000000..a0037a65d --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/filter.rego @@ -0,0 +1,32 @@ +package armo_builtins + +deny[msga] { + wl := input[_] + start_of_path := get_beginning_of_path(wl) + + msga := { + "alertMessage": sprintf("%v: %v in the following namespace: %v mounts service account tokens by default", [wl.kind, wl.metadata.name, wl.metadata.namespace]), + "packagename": "armo_builtins", + "alertScore": 9, + "alertObject": { + "k8sApiObjects": [wl] + }, + } +} + + +get_beginning_of_path(workload) = start_of_path { + spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + spec_template_spec_patterns[workload.kind] + start_of_path := ["spec", "template", "spec"] +} + +get_beginning_of_path(workload) = start_of_path { + workload.kind == "Pod" + start_of_path := ["spec"] +} + +get_beginning_of_path(workload) = start_of_path { + workload.kind == "CronJob" + start_of_path := ["spec", "jobTemplate", "spec", "template", "spec"] +} \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/raw.rego b/rules/workload-with-cluster-takeover-roles/raw.rego new file mode 100644 index 000000000..29ccdb5e2 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/raw.rego @@ -0,0 +1,138 @@ +package armo_builtins + +import future.keywords.in + +deny[msga] { + wl := input[_] + start_of_path := get_start_of_path(wl) + wl_spec := object.get(wl, start_of_path, []) + + # get service account wl is using + sa := input[_] + sa.kind == "ServiceAccount" + is_same_sa(wl_spec, sa.metadata, wl.metadata) + + # check service account token is mounted + is_sa_auto_mounted(wl_spec, sa) + + # check if sa has cluster takeover roles + role := input[_] + endswith(role.kind, "Role") + is_takeover_role(role) + + rolebinding := input[_] + endswith(rolebinding.kind, "RoleBinding") + rolebinding.roleRef.name == role.metadata.name + rolebinding.subjects[j].kind == "ServiceAccount" + rolebinding.subjects[j].name == sa.metadata.name + rolebinding.subjects[j].namespace == sa.metadata.namespace + + reviewPath := "roleRef" + deletePath := sprintf("subjects[%d]", [j]) + + msga := { + "alertMessage": sprintf("%v: %v in the following namespace: %v has cluster takeover roles", [wl.kind, wl.metadata.name, wl.metadata.namespace]), + "packagename": "armo_builtins", + "alertScore": 9, + "alertObject": { + "k8sApiObjects": [wl] + }, + "relatedObjects": [{ + "object": rolebinding, + "reviewPaths": [reviewPath], + "deletePaths": [deletePath], + }] + } +} + + +get_start_of_path(workload) = start_of_path { + spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + spec_template_spec_patterns[workload.kind] + start_of_path := ["spec", "template", "spec"] +} + +get_start_of_path(workload) = start_of_path { + workload.kind == "Pod" + start_of_path := ["spec"] +} + +get_start_of_path(workload) = start_of_path { + workload.kind == "CronJob" + start_of_path := ["spec", "jobTemplate", "spec", "template", "spec"] +} + + +is_sa_auto_mounted(wl_spec, sa) { + # automountServiceAccountToken not in pod spec + not wl_spec.automountServiceAccountToken == false + not wl_spec.automountServiceAccountToken == true + + not sa.automountServiceAccountToken == false +} + +is_sa_auto_mounted(wl_spec, sa) { + # automountServiceAccountToken set to true in pod spec + wl_spec.automountServiceAccountToken == true +} + + +is_same_sa(wl_spec, sa_metadata, wl_metadata) { + wl_spec.serviceAccountName == sa_metadata.name + is_same_namespace(sa_metadata , wl_metadata) +} + +is_same_sa(wl_spec, sa_metadata, wl_metadata) { + not wl_spec.serviceAccountName + sa_metadata.name == "default" + is_same_namespace(sa_metadata , wl_metadata) +} + +# is_same_namespace supports cases where ns is not configured in the metadata +# for yaml scans +is_same_namespace(metadata1, metadata2) { + metadata1.namespace == metadata2.namespace +} + +is_same_namespace(metadata1, metadata2) { + not metadata1.namespace + not metadata2.namespace +} + +is_same_namespace(metadata1, metadata2) { + not metadata2.namespace + metadata1.namespace == "default" +} + +is_same_namespace(metadata1, metadata2) { + not metadata1.namespace + metadata2.namespace == "default" +} + + +# look for rule allowing create/update workloads +is_takeover_role(role){ + takeover_resources := ["pods", "*"] + takeover_verbs := ["create", "update", "patch", "*"] + takeover_api_groups := ["", "*"] + + takeover_rule := [rule | rule = role.rules[i] ; + rule.resources[a] in takeover_resources ; + rule.verbs[b] in takeover_verbs ; + rule.apiGroups[c] in takeover_api_groups] + count(takeover_rule) > 0 +} + +# look for rule allowing secret access +is_takeover_role(role){ + rule := role.rules[i] + takeover_resources := ["secrets", "*"] + takeover_verbs := ["get", "list", "watch", "*"] + takeover_api_groups := ["", "*"] + + takeover_rule := [rule | rule = role.rules[i] ; + rule.resources[a] in takeover_resources ; + rule.verbs[b] in takeover_verbs ; + rule.apiGroups[c] in takeover_api_groups] + count(takeover_rule) > 0 +} \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/rule.metadata.json b/rules/workload-with-cluster-takeover-roles/rule.metadata.json new file mode 100644 index 000000000..abaccf99c --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/rule.metadata.json @@ -0,0 +1,63 @@ +{ + "name": "workload-with-cluster-takeover-roles", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins" +} \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json new file mode 100644 index 000000000..a8d9266e3 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json @@ -0,0 +1,59 @@ +[ + { + "alertMessage": "Pod: test-pd in the following namespace: default has cluster takeover roles", + "failedPaths": null, + "reviewPaths": null, + "deletePaths": null, + "fixPaths": null, + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 9, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "test-pd" + } + } + ] + }, + "relatedObjects": [ + { + "object": { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRoleBinding", + "metadata": { + "name": "read-secrets-global" + }, + "roleRef": { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "name": "test" + }, + "subjects": [ + { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "Group", + "name": "manager" + }, + { + "kind": "ServiceAccount", + "name": "default", + "namespace": "default" + } + ] + }, + "failedPaths": null, + "reviewPaths": [ + "roleRef" + ], + "deletePaths": [ + "subjects[1]" + ], + "fixPaths": null + } + ] + } +] \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/clusterrole.yaml b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/clusterrole.yaml new file mode 100644 index 000000000..a3c7c656d --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["create"] \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/clusterrolebinding.yaml b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/clusterrolebinding.yaml new file mode 100644 index 000000000..ba2b69958 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-secrets-global +subjects: +- kind: Group + name: manager + apiGroup: rbac.authorization.k8s.io +- kind: ServiceAccount + name: default + namespace: default +roleRef: + kind: ClusterRole + name: test + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/file.yaml b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/file.yaml new file mode 100644 index 000000000..495720efa --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/file.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pd + namespace: default +spec: + automountServiceAccountToken: true + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + path: /var diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/sa.json b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/sa.json new file mode 100644 index 000000000..ab36c3bb1 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/input/sa.json @@ -0,0 +1,17 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "automountServiceAccountToken": true, + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] +} diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json new file mode 100644 index 000000000..a93d443cd --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json @@ -0,0 +1,59 @@ +[ + { + "alertMessage": "Pod: test-pd in the following namespace: default has cluster takeover roles", + "failedPaths": null, + "reviewPaths": null, + "deletePaths": null, + "fixPaths": null, + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 9, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "test-pd" + } + } + ] + }, + "relatedObjects": [ + { + "object": { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRoleBinding", + "metadata": { + "name": "read-secrets-global" + }, + "roleRef": { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "name": "test" + }, + "subjects": [ + { + "kind": "ServiceAccount", + "name": "default", + "namespace": "default" + }, + { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "Group", + "name": "dev" + } + ] + }, + "failedPaths": null, + "reviewPaths": [ + "roleRef" + ], + "deletePaths": [ + "subjects[0]" + ], + "fixPaths": null + } + ] + } +] \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/clusterrole.yaml b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/clusterrole.yaml new file mode 100644 index 000000000..460d2eedd --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test +rules: +- apiGroups: ["*"] + resources: ["secrets", "users"] + verbs: ["get"] \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/clusterrolebinding.yaml b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/clusterrolebinding.yaml new file mode 100644 index 000000000..e61c4d450 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-secrets-global +subjects: +- kind: ServiceAccount + name: default + namespace: default +- kind: Group + name: dev + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: test + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/file.yaml b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/file.yaml new file mode 100644 index 000000000..495720efa --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/file.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pd + namespace: default +spec: + automountServiceAccountToken: true + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + path: /var diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/sa.json b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/sa.json new file mode 100644 index 000000000..ab36c3bb1 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/input/sa.json @@ -0,0 +1,17 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "automountServiceAccountToken": true, + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] +} diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/expected.json b/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/clusterrole.yaml b/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/clusterrole.yaml new file mode 100644 index 000000000..6ede27070 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test +rules: +- apiGroups: ["*"] + resources: ["*", "secrets", "users"] + verbs: ["get", "*"] \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/clusterrolebinding.yaml b/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/clusterrolebinding.yaml new file mode 100644 index 000000000..e1426bc28 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-secrets-global +subjects: +- kind: Group + name: manager + apiGroup: rbac.authorization.k8s.io +- kind: Group + name: dev + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: test + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/file.yaml b/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/file.yaml new file mode 100644 index 000000000..495720efa --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/file.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pd + namespace: default +spec: + automountServiceAccountToken: true + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + path: /var diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/sa.json b/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/sa.json new file mode 100644 index 000000000..ab36c3bb1 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-limited-permissions/input/sa.json @@ -0,0 +1,17 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "automountServiceAccountToken": true, + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] +} diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/expected.json b/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/clusterrole.yaml b/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/clusterrole.yaml new file mode 100644 index 000000000..6ede27070 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test +rules: +- apiGroups: ["*"] + resources: ["*", "secrets", "users"] + verbs: ["get", "*"] \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/clusterrolebinding.yaml b/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/clusterrolebinding.yaml new file mode 100644 index 000000000..e1426bc28 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-secrets-global +subjects: +- kind: Group + name: manager + apiGroup: rbac.authorization.k8s.io +- kind: Group + name: dev + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: test + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/file.yaml b/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/file.yaml new file mode 100644 index 000000000..495720efa --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/file.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pd + namespace: default +spec: + automountServiceAccountToken: true + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + path: /var diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/sa.json b/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/sa.json new file mode 100644 index 000000000..ab36c3bb1 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-not-mount-sa-token/input/sa.json @@ -0,0 +1,17 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "automountServiceAccountToken": true, + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] +} diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/expected.json b/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/cluterrole.yaml b/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/cluterrole.yaml new file mode 100644 index 000000000..fd8e287be --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/cluterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test +rules: +- apiGroups: [""] + resources: ["*"] + verbs: ["*"] \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/file.yaml b/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/file.yaml new file mode 100644 index 000000000..495720efa --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/file.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pd + namespace: default +spec: + automountServiceAccountToken: true + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + path: /var diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/rolebinding.yaml b/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/rolebinding.yaml new file mode 100644 index 000000000..4448be426 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/rolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod + namespace: kube-system +subjects: +- kind: User + name: jane + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: test + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/sa.json b/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/sa.json new file mode 100644 index 000000000..ab36c3bb1 --- /dev/null +++ b/rules/workload-with-cluster-takeover-roles/test/pass-wl-rolebinding/input/sa.json @@ -0,0 +1,17 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "automountServiceAccountToken": true, + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] +} diff --git a/testrunner/go.mod b/testrunner/go.mod index 7fb8d8fef..555c20cbf 100644 --- a/testrunner/go.mod +++ b/testrunner/go.mod @@ -1,133 +1,176 @@ module testrunner -go 1.19 +go 1.21 + +toolchain go1.22.0 require ( - github.com/armosec/armoapi-go v0.0.119 - github.com/golang/glog v1.0.0 - github.com/kubescape/k8s-interface v0.0.89 - github.com/kubescape/opa-utils v0.0.204 - github.com/open-policy-agent/opa v0.45.0 - github.com/stretchr/testify v1.8.0 + github.com/armosec/armoapi-go v0.0.330 + github.com/golang/glog v1.1.2 + github.com/kubescape/k8s-interface v0.0.161 + github.com/kubescape/opa-utils v0.0.277 + github.com/open-policy-agent/opa v0.61.0 + github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v3 v3.0.1 ) -require github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 // indirect +require ( + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.1.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/armosec/gojay v1.2.15 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.18.0 // indirect + github.com/aws/aws-sdk-go-v2/service/iam v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/briandowns/spinner v1.23.0 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/olvrng/ujson v1.1.0 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/afero v1.6.0 // indirect + github.com/spf13/cast v1.3.0 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/viper v1.7.0 // indirect + github.com/stripe/stripe-go/v74 v74.28.0 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2 // indirect + github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2 // indirect + github.com/uptrace/uptrace-go v1.18.0 // indirect + go.opentelemetry.io/contrib/instrumentation/runtime v0.44.0 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.41.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.41.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/sdk v1.21.0 // indirect + go.opentelemetry.io/otel/sdk/metric v0.41.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect + golang.org/x/sync v0.5.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect + gopkg.in/ini.v1 v1.51.0 // indirect +) require ( - cloud.google.com/go v0.102.1 // indirect - cloud.google.com/go/compute v1.7.0 // indirect - cloud.google.com/go/container v1.2.0 // indirect - github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.27 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect + cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/container v1.27.1 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect - github.com/armosec/utils-go v0.0.12 // indirect - github.com/armosec/utils-k8s-go v0.0.12 // indirect - github.com/aws/aws-sdk-go-v2 v1.16.7 // indirect - github.com/aws/aws-sdk-go-v2/config v1.15.13 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.12.8 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15 // indirect - github.com/aws/aws-sdk-go-v2/service/eks v1.21.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.11 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.16.9 // indirect - github.com/aws/smithy-go v1.12.0 // indirect + github.com/armosec/utils-go v0.0.57 // indirect + github.com/armosec/utils-k8s-go v0.0.26 // indirect + github.com/aws/aws-sdk-go-v2 v1.19.1 // indirect + github.com/aws/aws-sdk-go-v2/config v1.18.30 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.29 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37 // indirect + github.com/aws/aws-sdk-go-v2/service/eks v1.28.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.12.14 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.20.1 // indirect + github.com/aws/smithy-go v1.13.5 // indirect github.com/coreos/go-oidc v2.2.1+incompatible // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/docker v20.10.24+incompatible // indirect - github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/docker/docker v25.0.1+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.2.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.8 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kr/pretty v0.2.1 // indirect - github.com/kubescape/go-logger v0.0.6 // indirect + github.com/kubescape/go-logger v0.0.22 // indirect github.com/kubescape/rbac-utils v0.0.20 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/pquerna/cachecontrol v0.1.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/pquerna/cachecontrol v0.2.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/yannh/kubeconform v0.6.2 - github.com/yashtewari/glob-intersection v0.1.0 // indirect - go.opencensus.io v0.23.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - go.uber.org/zap v1.22.0 // indirect - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - google.golang.org/api v0.85.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220708155623-50e5f4832e73 // indirect - google.golang.org/grpc v1.49.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + github.com/yashtewari/glob-intersection v0.2.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/oauth2 v0.14.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/api v0.149.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/grpc v1.61.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/api v0.25.3 // indirect - k8s.io/apimachinery v0.25.3 // indirect - k8s.io/client-go v0.25.3 // indirect - k8s.io/klog/v2 v2.70.1 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect - sigs.k8s.io/controller-runtime v0.12.3 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + k8s.io/api v0.29.2 // indirect + k8s.io/apimachinery v0.29.2 // indirect + k8s.io/client-go v0.29.2 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/controller-runtime v0.15.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) replace github.com/libgit2/git2go/v33 => ./git2go diff --git a/testrunner/go.sum b/testrunner/go.sum index 60d22354e..aead3d753 100644 --- a/testrunner/go.sum +++ b/testrunner/go.sum @@ -7,238 +7,211 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/container v1.2.0 h1:LPKlQa4XfBTWdaBSDx/KQ/v45l8FDRzSV0tDpU6e/38= -cloud.google.com/go/container v1.2.0/go.mod h1:Cj2AgMsCUfMVfbGh0Fx7u5Ah/qeC0ajLrqqGGiAdCGw= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/container v1.27.1 h1:ZfLRiFM9ddFE92SlA28rknI6YJMz5Z5huAQK+FKWxIQ= +cloud.google.com/go/container v1.27.1/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/azure-sdk-for-go v66.0.0+incompatible h1:bmmC38SlE8/E81nNADlgmVGurPWMHDX2YNXVQMrBpEE= -github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= -github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= -github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0 h1:qtRcg5Y7jNJ4jEzPq4GpWLfTspHdNe2ZK6LjwGcjgmU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0/go.mod h1:lPneRe3TwsoDRKY4O6YDLXHhEWrD+TIRa8XrV/3/fqw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.1.1 h1:6A4M8smF+y8nM/DYsLNQz9n7n2ZGaEVqfz8ZWQirQkI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.1.1/go.mod h1:WqyxV5S0VtXD2+2d6oPqOvyhGubCvzLCKSAKgQ004Uk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0 h1:1u/K2BFv0MwkG6he8RYuUcbbeK22rkoZbg4lKa/msZU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0/go.mod h1:U5gpsREQZE6SLk1t/cFfc1eMhYAlYpEzvaYXuDfefy8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/armosec/armoapi-go v0.0.119 h1:7XbvBbOKp26Bpp72LQ8Spw4FBpbXu3+qZFQyPEwTPFk= -github.com/armosec/armoapi-go v0.0.119/go.mod h1:2zoNzb3Fy9ZByeczJZ47ftDRLRzTykVdTISS3GTc/JU= -github.com/armosec/utils-go v0.0.12 h1:NXkG/BhbSVAmTVXr0qqsK02CmxEiXuJyPmdTRcZ4jAo= -github.com/armosec/utils-go v0.0.12/go.mod h1:F/K1mI/qcj7fNuJl7xktoCeHM83azOF0Zq6eC2WuPyU= -github.com/armosec/utils-k8s-go v0.0.12 h1:u7kHSUp4PpvPP3hEaRXMbM0Vw23IyLhAzzE+2TW6Jkk= -github.com/armosec/utils-k8s-go v0.0.12/go.mod h1:rPHiOaHefWa9ujspwvYYAp0uEbqGGyAMiNrFa/Gpp/8= -github.com/aws/aws-sdk-go-v2 v1.16.7 h1:zfBwXus3u14OszRxGcqCDS4MfMCv10e8SMJ2r8Xm0Ns= -github.com/aws/aws-sdk-go-v2 v1.16.7/go.mod h1:6CpKuLXg2w7If3ABZCl/qZ6rEgwtjZTn4eAf4RcEyuw= -github.com/aws/aws-sdk-go-v2/config v1.15.13 h1:CJH9zn/Enst7lDiGpoguVt0lZr5HcpNVlRJWbJ6qreo= -github.com/aws/aws-sdk-go-v2/config v1.15.13/go.mod h1:AcMu50uhV6wMBUlURnEXhr9b3fX6FLSTlEV89krTEGk= -github.com/aws/aws-sdk-go-v2/credentials v1.12.8 h1:niTa7zc7uyOP2ufri0jPESBt1h9yP3Zc0q+xzih3h8o= -github.com/aws/aws-sdk-go-v2/credentials v1.12.8/go.mod h1:P2Hd4Sy7mXRxPNcQMPBmqszSJoDXexX8XEDaT6lucO0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8 h1:VfBdn2AxwMbFyJN/lF/xuT3SakomJ86PZu3rCxb5K0s= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8/go.mod h1:oL1Q3KuCq1D4NykQnIvtRiBGLUXhcpY5pl6QZB2XEPU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14 h1:2C0pYHcUBmdzPj+EKNC4qj97oK6yjrUhc1KoSodglvk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14/go.mod h1:kdjrMwHwrC3+FsKhNcCMJ7tUVj/8uSD5CZXeQ4wV6fM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8 h1:2J+jdlBJWEmTyAwC82Ym68xCykIvnSnIN18b8xHGlcc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8/go.mod h1:ZIV8GYoC6WLBW5KGs+o4rsc65/ozd+eQ0L31XF5VDwk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15 h1:QquxR7NH3ULBsKC+NoTpilzbKKS+5AELfNREInbhvas= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15/go.mod h1:Tkrthp/0sNBShQQsamR7j/zY4p19tVTAs+nnqhH6R3c= -github.com/aws/aws-sdk-go-v2/service/eks v1.21.4 h1:qmKWieiIiYwD46GRD6nxFc1KsyR0ChGRid8emb7rDEY= -github.com/aws/aws-sdk-go-v2/service/eks v1.21.4/go.mod h1:Th2+t6mwi0bZayXUOFOTuyWR2nwRUVcadDy4WGE8C2E= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8 h1:oKnAXxSF2FUvfgw8uzU/v9OTYorJJZ8eBmWhr9TWVVQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8/go.mod h1:rDVhIMAX9N2r8nWxDUlbubvvaFMnfsm+3jAV7q+rpM4= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.11 h1:XOJWXNFXJyapJqQuCIPfftsOf0XZZioM0kK6OPRt9MY= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.11/go.mod h1:MO4qguFjs3wPGcCSpQ7kOFTwRvb+eu+fn+1vKleGHUk= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.9 h1:yOfILxyjmtr2ubRkRJldlHDFBhf5vw4CzhbwWIBmimQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.9/go.mod h1:O1IvkYxr+39hRf960Us6j0x1P8pDqhTX+oXM5kQNl/Y= -github.com/aws/smithy-go v1.12.0 h1:gXpeZel/jPoWQ7OEmLIgCUnhkFftqNfwWUwAHSlp1v0= -github.com/aws/smithy-go v1.12.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armosec/armoapi-go v0.0.330 h1:kvyLshJ3VBqWxDO/hvlpVU1DNsrmkb5M0oStw+Uwxb8= +github.com/armosec/armoapi-go v0.0.330/go.mod h1:6VYIw1hoNU3dTXKckMHNHhzhhPTMXDHtv5AFxvG4Q+U= +github.com/armosec/gojay v1.2.15 h1:sSB2vnAvacUNkw9nzUYZKcPzhJOyk6/5LK2JCNdmoZY= +github.com/armosec/gojay v1.2.15/go.mod h1:vzVAaay2TWJAngOpxu8aqLbye9jMgoKleuAOK+xsOts= +github.com/armosec/utils-go v0.0.57 h1:0RaqexK+t7HeKWfldBv2C1JiLLGuUx9FP0DGWDNRJpg= +github.com/armosec/utils-go v0.0.57/go.mod h1:4wfINE8JTQ6EHvSL2jki0Q3/D1j6oDi6sxxrtAEug74= +github.com/armosec/utils-k8s-go v0.0.26 h1:gVSV1mrALyphaesc+JXbx9SfbxLqfgg1KvvC1/0Hfkk= +github.com/armosec/utils-k8s-go v0.0.26/go.mod h1:WL2brx3tszxeSl1yHac0oAVJUg3o22HYh1dPjaSfjXU= +github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.19.1 h1:STs0lbbpXu3byTPcnRLghs2DH0yk9qKDo27TyyJSKsM= +github.com/aws/aws-sdk-go-v2 v1.19.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/config v1.18.30 h1:TTAXQIn31qYFUQjkW6siVrRTX1ux+sADZDOe3jsZcMg= +github.com/aws/aws-sdk-go-v2/config v1.18.30/go.mod h1:+YogjT7e/t9JVu/sOnZZgxTge1G+bPNk8zOaI0QIQvE= +github.com/aws/aws-sdk-go-v2/credentials v1.13.29 h1:KNgCpThGuZyCjq9EuuqoLDenKKMwO/x1Xx01ckDa7VI= +github.com/aws/aws-sdk-go-v2/credentials v1.13.29/go.mod h1:VMq1LcmSEa9qxBlOCYTjVuGJWEEzhGmgL552jQsmhss= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6 h1:kortK122LvTU34CGX/F9oJpelXKkEA2j/MW48II+8+8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6/go.mod h1:k7IPHyHNIASI0m0RwOmCjWOTtgG+J0raqwuHH8WhWJE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36 h1:kbk81RlPoC6e4co7cQx2FAvH9TgbzxIqCqiosAFiB+w= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36/go.mod h1:T8Jsn/uNL/AFOXrVYQ1YQaN1r9gN34JU1855/Lyjv+o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30 h1:lMl8S5SB8jNCB+Sty2Em4lnu3IJytceHQd7qbmfqKL0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30/go.mod h1:v3GSCnFxbHzt9dlWBqvA1K1f9lmWuf4ztupZBCAIVs4= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37 h1:BXiqvN7WuV/pMhz8CivhO8cG8icJcjnjHumif4ukQ0c= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37/go.mod h1:d4GZ62cjnz/hjKFdAu11gAwK73bdhqaFv2O4J1gaqIs= +github.com/aws/aws-sdk-go-v2/service/ecr v1.18.0 h1:5RVanD+P+L2W9WU07/8J/A52vnQi7F3ClBdWQttgYlg= +github.com/aws/aws-sdk-go-v2/service/ecr v1.18.0/go.mod h1:9yGOFsa2OcdyePojE89xNGtdBusTyc8ocjpiuFtFc0g= +github.com/aws/aws-sdk-go-v2/service/eks v1.28.1 h1:SA+98Rnehl2KXewvGXc2Lw2ns3Y4t9jdMHmEY5hcNws= +github.com/aws/aws-sdk-go-v2/service/eks v1.28.1/go.mod h1:cQRkgJKg6s9AIzFZ+i4pXdm+/3Fw4MuPNqCdMvSaqns= +github.com/aws/aws-sdk-go-v2/service/iam v1.19.0 h1:9vCynoqC+dgxZKrsjvAniyIopsv3RZFsZ6wkQ+yxtj8= +github.com/aws/aws-sdk-go-v2/service/iam v1.19.0/go.mod h1:OyAuvpFeSVNppcSsp1hFOVQcaTRc1LE24YIR7pMbbAA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30 h1:UcVZxLVNY4yayCmiG94Ge3l2qbc5WEB/oa4RmjoQEi0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30/go.mod h1:wPffyJiWWtHwvpFyn23WjAjVjMnlQOQrl02+vutBh3Y= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.14 h1:gUjz7trfz9qBm0AlkKTvJHBXELi1wvw+2LA9GfD2AsM= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.14/go.mod h1:9kfRdJgLCbnyeqZ/DpaSwcgj9ZDYLfRpe8Sze+NrYfQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14 h1:8bEtxV5UT9ucdWGXfZ7CM3caQhSHGjWnTHt0OeF7m7s= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14/go.mod h1:nd9BG2UnexN2sDx/mk2Jd6pf3d2E61AiA8m8Fdvdx8Y= +github.com/aws/aws-sdk-go-v2/service/sts v1.20.1 h1:U7h9CPoyMfVoN5jUglB0LglCMP10AK4vMBsbsCKM8Yw= +github.com/aws/aws-sdk-go-v2/service/sts v1.20.1/go.mod h1:BUHusg4cOA1TFGegj7x8/eoWrbdHzJfoMrXcbMQAG0k= +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= +github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bytecodealliance/wasmtime-go v1.0.0 h1:9u9gqaUiaJeN5IoD1L7egD8atOnTGyJcNp8BhkL9cUU= +github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= +github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger/v3 v3.2103.2 h1:dpyM5eCJAtQCBcMCZcT4UBZchuTJgCywerHHgmxfxM8= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= +github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= -github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= -github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= -github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v25.0.1+incompatible h1:k5TYd5rIVQRSqcTwCID+cyVA0yRg86+Pcrz1ls0/frA= +github.com/docker/docker v25.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897 h1:E52jfcE64UG42SwLmrW0QByONfGynWuzBvm86BoB9z8= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -246,101 +219,96 @@ github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200j github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= @@ -348,51 +316,70 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA= -github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI= -github.com/kubescape/k8s-interface v0.0.89 h1:OtlvZosHpjlbHfsilfQk2wRbuBnxwF0e+WZX6GbkfLU= -github.com/kubescape/k8s-interface v0.0.89/go.mod h1:pgFRs20mHiavf6+fFWY7h/f8HuKlwuZwirvjxiKJlu0= -github.com/kubescape/opa-utils v0.0.204 h1:9O9drjyzjOhI7Xi2S4Px0WKa66U5GFPQqeOLvhDqHnw= -github.com/kubescape/opa-utils v0.0.204/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg= +github.com/kubescape/go-logger v0.0.22 h1:gle7wH6emOiGv9ljdpVi82pWLQ3jGucrUucvil6JXHE= +github.com/kubescape/go-logger v0.0.22/go.mod h1:x3HBpZo3cMT/WIdy18BxvVVd5D0e/PWFVk/HiwBNu3g= +github.com/kubescape/k8s-interface v0.0.161 h1:v6b3/kmA4o/2niNrejrbXj5X9MLfH0UrpI3s+e/fdwc= +github.com/kubescape/k8s-interface v0.0.161/go.mod h1:oF+Yxug3Kpfu9Yr2j63wy7gwswrKXpiqI0mLk/7gF/s= +github.com/kubescape/opa-utils v0.0.277 h1:nlzhvHZE0mAQ6YTtNgod4nI0wKwL9/7yCynobbKn2go= +github.com/kubescape/opa-utils v0.0.277/go.mod h1:N/UnbZHpoiHQH7O50yadhIXZvVl0IVtTGBmePPrSQSg= github.com/kubescape/rbac-utils v0.0.20 h1:1MMxsCsCZ3ntDi8f9ZYYcY+K7bv50bDW5ZvnGnhMhJw= github.com/kubescape/rbac-utils v0.0.20/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -403,43 +390,71 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= -github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs= -github.com/open-policy-agent/opa v0.45.0/go.mod h1:/OnsYljNEWJ6DXeFOOnoGn8CvwZGMUS4iRqzYdJvmBI= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olvrng/ujson v1.1.0 h1:8xVUzVlqwdMVWh5d1UHBtLQ1D50nxoPuPEq9Wozs8oA= +github.com/olvrng/ujson v1.1.0/go.mod h1:Mz4G3RODTUfbkKyvi0lgmPx/7vd3Saksk+1jgk8s9xo= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/open-policy-agent/opa v0.61.0 h1:nhncQ2CAYtQTV/SMBhDDPsCpCQsUW+zO/1j+T5V7oZg= +github.com/open-policy-agent/opa v0.61.0/go.mod h1:7OUuzJnsS9yHf8lw0ApfcbrnaRG1EkN3J2fuuqi4G/E= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= -github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k= +github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 h1:lEOLY2vyGIqKWUI9nzsOJRV3mb3WC9dXYORsLEUcoeY= github.com/santhosh-tekuri/jsonschema/v5 v5.1.1/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -463,83 +478,134 @@ github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1l github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stripe/stripe-go/v74 v74.28.0 h1:ItzPPy+cjMKbR3Oihknt/8dv6PANp3hTThUGZjhF9lc= +github.com/stripe/stripe-go/v74 v74.28.0/go.mod h1:f9L6LvaXa35ja7eyvP6GQswoaIPaBRvGAimAO+udbBw= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2 h1:CNznWHkrbA6o1q2H/BsH4tIHf4zbKNtndeoV+AH8z0U= +github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2/go.mod h1:7YSrHCmYPHIXjTWnKSU7EGT0TFEcm3WwSeQquwCGg38= +github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2 h1:uyrW06oJi4iWvhjPLVfk4qrSP2Zm0AMozKKDmp6i4pE= +github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2/go.mod h1:PMAs2dNxP55lgt6xu0if+Jasm6s+Xpmqn6ev1NyDfnI= +github.com/uptrace/uptrace-go v1.18.0 h1:RY15qy19C0irbe2UCxQbjenk8WyUdvUV756R9ZpqCGI= +github.com/uptrace/uptrace-go v1.18.0/go.mod h1:BUW3sFgEyRmZIxts4cv6TGaJnWAW95uW78GIiSdChOQ= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yannh/kubeconform v0.6.2 h1:xjUxiCcqTBofTsM3UT6fNb/tKRfqjakNfWvHRa3sGOo= github.com/yannh/kubeconform v0.6.2/go.mod h1:4E6oaL+lh7KgCG2SaOabeeAFBkyKu5D9ab0OEekGcbs= -github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg= -github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= -go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/contrib/instrumentation/runtime v0.44.0 h1:TXu20nL4yYfJlQeqG/D3Ia6b0p2HZmLfJto9hqJTQ/c= +go.opentelemetry.io/contrib/instrumentation/runtime v0.44.0/go.mod h1:tQ5gBnfjndV1su3+DiLuu6rnd9hBBzg4rkRILnjSNFg= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.41.0 h1:k0k7hFNDd8K4iOMJXj7s8sHaC4mhTlAeppRmZXLgZ6k= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.41.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.41.0 h1:HgbDTD8pioFdY3NRc/YCvsWjqQPtweGyXxa32LgnTOw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.41.0/go.mod h1:tmvt/yK5Es5d6lHYWerLSOna8lCEfrBVX/a9M0ggqss= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0 h1:hSWWvDjXHVLq9DkmB+77fl8v7+t+yYiS+eNkiplDK54= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0/go.mod h1:zG7KQql1WjZCaUJd+L/ReSYx4bjbYJxg5ws9ws+mYes= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/sdk/metric v0.41.0 h1:c3sAt9/pQ5fSIUfl0gPtClV3HhE18DCVzByD33R/zsk= +go.opentelemetry.io/otel/sdk/metric v0.41.0/go.mod h1:PmOmSt+iOklKtIg5O4Vz9H/ttcRFSNTgii+E1KGyn1w= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -550,27 +616,22 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -580,66 +641,21 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 h1:VnGaRqoLmqZH/3TMLJwYCEWkR4j1nuIU1U9TvbqsDUw= -golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= +golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -647,15 +663,18 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -664,82 +683,37 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -748,6 +722,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -757,51 +732,17 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -810,52 +751,16 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0 h1:8rJoHuRxx+vCmZtAO/3k1dRLvYNVyTJtZ5oaFZvhgvc= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -869,79 +774,13 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220708155623-50e5f4832e73 h1:sdZWfcGN37Dv0QWIhuasQGMzAQJOL2oqnvot4/kPgfQ= -google.golang.org/genproto v0.0.0-20220708155623-50e5f4832e73/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -950,35 +789,10 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= +google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -987,37 +801,37 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1025,31 +839,26 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= -k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI= -k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= -k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= -k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= -k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= -sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= +sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= From 47123551255c798ec7e8a3da69ee1aba7b53f9bd Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 29 Feb 2024 14:20:01 +0200 Subject: [PATCH 112/195] go mod tidy Signed-off-by: YiscahLevySilas1 --- go.mod | 2 +- go.sum | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c50b0e999..d1895abee 100644 --- a/go.mod +++ b/go.mod @@ -63,7 +63,7 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/glog v1.2.0 + github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect diff --git a/go.sum b/go.sum index c097ccc38..8538c5faa 100644 --- a/go.sum +++ b/go.sum @@ -224,7 +224,6 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -275,8 +274,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= From ca99a401b44c3d0778a651a7263e2218dc0f3d8d Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 29 Feb 2024 14:24:49 +0200 Subject: [PATCH 113/195] Revert "delete C-0001" This reverts commit b9cc82c9f84d5c4696b194e5490921449c87fc8b. Signed-off-by: YiscahLevySilas1 --- .../C-0001-forbiddencontainerregistries.json | 33 +++++++ ...91-cve202247633kyvernosignaturebypass.json | 2 +- default-config-inputs.json | 2 + exceptions/kubescape-prometheus.json | 6 ++ exceptions/kubescape.json | 3 + frameworks/allcontrols.json | 6 ++ frameworks/armobest.json | 6 ++ .../raw.rego | 55 +++++++++++ .../rule.metadata.json | 67 +++++++++++++ .../raw.rego | 98 +++++++++++++++++++ .../rule.metadata.json | 67 +++++++++++++ .../test/cronjob/data.json | 7 ++ .../test/cronjob/expected.json | 26 +++++ .../test/cronjob/input/cronjob.yaml | 24 +++++ .../test/pod/data.json | 7 ++ .../test/pod/expected.json | 1 + .../test/pod/input/pod.yaml | 13 +++ .../test/workloads/data.json | 7 ++ .../test/workloads/expected.json | 29 ++++++ .../test/workloads/input/deployment.yaml | 40 ++++++++ 20 files changed, 498 insertions(+), 1 deletion(-) create mode 100644 controls/C-0001-forbiddencontainerregistries.json create mode 100644 rules/rule-identify-blocklisted-image-registries-v1/raw.rego create mode 100644 rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json create mode 100644 rules/rule-identify-blocklisted-image-registries/raw.rego create mode 100644 rules/rule-identify-blocklisted-image-registries/rule.metadata.json create mode 100644 rules/rule-identify-blocklisted-image-registries/test/cronjob/data.json create mode 100644 rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json create mode 100644 rules/rule-identify-blocklisted-image-registries/test/cronjob/input/cronjob.yaml create mode 100644 rules/rule-identify-blocklisted-image-registries/test/pod/data.json create mode 100644 rules/rule-identify-blocklisted-image-registries/test/pod/expected.json create mode 100644 rules/rule-identify-blocklisted-image-registries/test/pod/input/pod.yaml create mode 100644 rules/rule-identify-blocklisted-image-registries/test/workloads/data.json create mode 100644 rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json create mode 100644 rules/rule-identify-blocklisted-image-registries/test/workloads/input/deployment.yaml diff --git a/controls/C-0001-forbiddencontainerregistries.json b/controls/C-0001-forbiddencontainerregistries.json new file mode 100644 index 000000000..de918c769 --- /dev/null +++ b/controls/C-0001-forbiddencontainerregistries.json @@ -0,0 +1,33 @@ +{ + "name": "Forbidden Container Registries", + "attributes": { + "microsoftMitreColumns": [ + "Initial Access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "actionRequired": "configuration" + }, + "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", + "remediation": "Limit the registries from which you pull container images from", + "rulesNames": [ + "rule-identify-blocklisted-image-registries", + "rule-identify-blocklisted-image-registries-v1" + ], + "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", + "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", + "controlID": "C-0001", + "baseScore": 7.0, + "example": "@controls/examples/c001.yaml", + "category": { + "name" : "Workload" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } +} \ No newline at end of file diff --git a/controls/C-0091-cve202247633kyvernosignaturebypass.json b/controls/C-0091-cve202247633kyvernosignaturebypass.json index 85cc74800..26d40f3be 100644 --- a/controls/C-0091-cve202247633kyvernosignaturebypass.json +++ b/controls/C-0091-cve202247633kyvernosignaturebypass.json @@ -10,7 +10,7 @@ "rulesNames": [ "CVE-2022-47633" ], - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0078 for limiting the use of trusted repositories.", + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", "controlID": "C-0091", "baseScore": 8.0, diff --git a/default-config-inputs.json b/default-config-inputs.json index 779b5c5bd..699fa3127 100644 --- a/default-config-inputs.json +++ b/default-config-inputs.json @@ -37,6 +37,7 @@ "bin/busybox", "usr/bin/busybox" ], + "publicRegistries": [], "sensitiveInterfaces": [ "nifi", "argo-server", @@ -84,6 +85,7 @@ "weave-scope-app", "kubernetes-dashboard" ], + "untrustedRegistries": [], "memory_request_max": [], "memory_request_min": ["0"], "memory_limit_max": [], diff --git a/exceptions/kubescape-prometheus.json b/exceptions/kubescape-prometheus.json index ae712885a..fe83aff47 100644 --- a/exceptions/kubescape-prometheus.json +++ b/exceptions/kubescape-prometheus.json @@ -53,6 +53,9 @@ } ], "posturePolicies": [ + { + "controlID": "c-0001" + }, { "controlID": "c-0078" } @@ -137,6 +140,9 @@ } ], "posturePolicies": [ + { + "controlID": "c-0001" + }, { "controlID": "c-0078" } diff --git a/exceptions/kubescape.json b/exceptions/kubescape.json index 34b2187ed..824efe6d9 100644 --- a/exceptions/kubescape.json +++ b/exceptions/kubescape.json @@ -179,6 +179,9 @@ } ], "posturePolicies": [ + { + "controlID": "c-0001" + }, { "controlID": "c-0078" } diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index df48e2db8..e730e11fc 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -12,6 +12,12 @@ }, "typeTags": ["compliance"], "activeControls": [ + { + "controlID": "C-0001", + "patch": { + "name": "Forbidden Container Registries" + } + }, { "controlID": "C-0002", "patch": { diff --git a/frameworks/armobest.json b/frameworks/armobest.json index aa5c1bd55..bb2196030 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -12,6 +12,12 @@ }, "typeTags": ["compliance"], "activeControls": [ + { + "controlID": "C-0001", + "patch": { + "name": "Forbidden Container Registries" + } + }, { "controlID": "C-0002", "patch": { diff --git a/rules/rule-identify-blocklisted-image-registries-v1/raw.rego b/rules/rule-identify-blocklisted-image-registries-v1/raw.rego new file mode 100644 index 000000000..b6d018d2f --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries-v1/raw.rego @@ -0,0 +1,55 @@ +package armo_builtins + +untrustedImageRepo[msga] { + wl := input[_] + containers_path := get_containers_path(wl) + containers := object.get(wl, containers_path, []) + container := containers[i] + name := image.parse_normalized_name(container.image) + untrusted_or_public_registries(name) + path := sprintf("%s[%d].image", [concat(".", containers_path), i]) + + msga := { + "alertMessage": sprintf("image '%v' in container '%s' comes from untrusted registry", [name, container.name]), + "packagename": "armo_builtins", + "alertScore": 2, + "fixPaths": [], + "reviewPaths": [path], + "failedPaths": [path], + "alertObject": {"k8sApiObjects": [wl]}, + } +} + +untrusted_or_public_registries(image){ + # see default-config-inputs.json for list values + untrusted_registries := data.postureControlInputs.untrustedRegistries + registry := untrusted_registries[_] + startswith(image, registry) + +} + +untrusted_or_public_registries(image){ + # see default-config-inputs.json for list values + public_registries := data.postureControlInputs.publicRegistries + registry := public_registries[_] + startswith(image, registry) +} + +# get_containers_path - get resource containers paths for {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} +get_containers_path(resource) := result { + resource_kinds := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} + resource_kinds[resource.kind] + result = ["spec", "template", "spec", "containers"] +} + +# get_containers_path - get resource containers paths for "Pod" +get_containers_path(resource) := result { + resource.kind == "Pod" + result = ["spec", "containers"] +} + +# get_containers_path - get resource containers paths for "CronJob" +get_containers_path(resource) := result { + resource.kind == "CronJob" + result = ["spec", "jobTemplate", "spec", "template", "spec", "containers"] +} \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json b/rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json new file mode 100644 index 000000000..5a3221ad4 --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries-v1/rule.metadata.json @@ -0,0 +1,67 @@ +{ + "name": "rule-identify-blocklisted-image-registries-v1", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "useFromKubescapeVersion": "v2.9.0" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.publicRegistries", + "settings.postureControlInputs.untrustedRegistries" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.publicRegistries", + "name": "Public registries", + "description": "Kubescape checks none of these public container registries are in use." + }, + { + "path": "settings.postureControlInputs.untrustedRegistries", + "name": "Registries block list", + "description": "Kubescape checks none of these user-provided container registries are in use." + } + ], + "description": "Identifying if pod container images are from unallowed registries", + "remediation": "Use images from safe registry", + "ruleQuery": "" +} \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/raw.rego b/rules/rule-identify-blocklisted-image-registries/raw.rego new file mode 100644 index 000000000..add46113a --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/raw.rego @@ -0,0 +1,98 @@ +package armo_builtins + +# Check for images from blocklisted repos + +untrustedImageRepo[msga] { + pod := input[_] + k := pod.kind + k == "Pod" + container := pod.spec.containers[i] + path := sprintf("spec.containers[%v].image", [format_int(i, 10)]) + image := container.image + untrusted_or_public_registries(image) + + msga := { + "alertMessage": sprintf("image '%v' in container '%s' comes from untrusted registry", [image, container.name]), + "packagename": "armo_builtins", + "alertScore": 2, + "fixPaths": [], + "reviewPaths": [path], + "failedPaths": [path], + "alertObject": { + "k8sApiObjects": [pod] + } + } +} + +untrustedImageRepo[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + path := sprintf("spec.template.spec.containers[%v].image", [format_int(i, 10)]) + image := container.image + untrusted_or_public_registries(image) + + msga := { + "alertMessage": sprintf("image '%v' in container '%s' comes from untrusted registry", [image, container.name]), + "packagename": "armo_builtins", + "alertScore": 2, + "fixPaths": [], + "reviewPaths": [path], + "failedPaths": [path], + "alertObject": { + "k8sApiObjects": [wl] + } + } +} + +untrustedImageRepo[msga] { + wl := input[_] + wl.kind == "CronJob" + container := wl.spec.jobTemplate.spec.template.spec.containers[i] + path := sprintf("spec.jobTemplate.spec.template.spec.containers[%v].image", [format_int(i, 10)]) + image := container.image + untrusted_or_public_registries(image) + + msga := { + "alertMessage": sprintf("image '%v' in container '%s' comes from untrusted registry", [image, container.name]), + "packagename": "armo_builtins", + "alertScore": 2, + "fixPaths": [], + "reviewPaths": [path], + "failedPaths": [path], + "alertObject": { + "k8sApiObjects": [wl] + } + } +} + +untrusted_or_public_registries(image){ + # see default-config-inputs.json for list values + untrusted_registries := data.postureControlInputs.untrustedRegistries + registry := untrusted_registries[_] + regex.match(regexify(registry), docker_host_wrapper(image)) +} + +untrusted_or_public_registries(image){ + # see default-config-inputs.json for list values + public_registries := data.postureControlInputs.publicRegistries + registry := public_registries[_] + regex.match(regexify(registry), docker_host_wrapper(image)) +} + + +# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'. +# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub. +docker_host_wrapper(image) = result { + not contains(image, "/") + result := sprintf("docker.io/%s", [image]) +} else := image + + + +# regexify - returns a registry regex to be searched only for the image host. +regexify(registry) := result { + endswith(registry, "/") + result = sprintf("^%s.*$", [registry]) +} else := sprintf("^%s\/.*$", [registry]) diff --git a/rules/rule-identify-blocklisted-image-registries/rule.metadata.json b/rules/rule-identify-blocklisted-image-registries/rule.metadata.json new file mode 100644 index 000000000..7537a8bc1 --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/rule.metadata.json @@ -0,0 +1,67 @@ +{ + "name": "rule-identify-blocklisted-image-registries", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "useUntilKubescapeVersion": "v2.3.8" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.publicRegistries", + "settings.postureControlInputs.untrustedRegistries" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.publicRegistries", + "name": "Public registries", + "description": "Kubescape checks none of these public container registries are in use." + }, + { + "path": "settings.postureControlInputs.untrustedRegistries", + "name": "Registries block list", + "description": "Kubescape checks none of these user-provided container registries are in use." + } + ], + "description": "Identifying if pod container images are from unallowed registries", + "remediation": "Use images from safe registry", + "ruleQuery": "" +} \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/cronjob/data.json b/rules/rule-identify-blocklisted-image-registries/test/cronjob/data.json new file mode 100644 index 000000000..2878e4e40 --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/test/cronjob/data.json @@ -0,0 +1,7 @@ +{ + "postureControlInputs": { + "untrustedRegistries": [ + "quay.io" + ] + } +} diff --git a/rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json b/rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json new file mode 100644 index 000000000..d8b342287 --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/test/cronjob/expected.json @@ -0,0 +1,26 @@ +[ + { + "alertMessage": "image 'quay.io/hi:latest' in container 'hello' comes from untrusted registry", + "reviewPaths": [ + "spec.jobTemplate.spec.template.spec.containers[0].image" + ], + "failedPaths": [ + "spec.jobTemplate.spec.template.spec.containers[0].image" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/cronjob/input/cronjob.yaml b/rules/rule-identify-blocklisted-image-registries/test/cronjob/input/cronjob.yaml new file mode 100644 index 000000000..16e3f48aa --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/test/cronjob/input/cronjob.yaml @@ -0,0 +1,24 @@ +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: hello +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + serviceAccountName: kubernetes-dashboard + restartPolicy: OnFailure + containers: + - name: hello + image: quay.io/hi:latest + env : + - + name : pwd + value : "Hpwd" + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster diff --git a/rules/rule-identify-blocklisted-image-registries/test/pod/data.json b/rules/rule-identify-blocklisted-image-registries/test/pod/data.json new file mode 100644 index 000000000..709abae38 --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/test/pod/data.json @@ -0,0 +1,7 @@ +{ + "postureControlInputs": { + "publicRegistries": [ + "docker.io" + ] + } +} \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/pod/expected.json b/rules/rule-identify-blocklisted-image-registries/test/pod/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/test/pod/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/pod/input/pod.yaml b/rules/rule-identify-blocklisted-image-registries/test/pod/input/pod.yaml new file mode 100644 index 000000000..6d777e7aa --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/test/pod/input/pod.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: audit-pod + labels: + app: audit-pod +spec: + containers: + - name: test-container + env : + - name : azure_batch_key + value : "Hello from the environment" + image: quay.io/http-echo:0.2.3 diff --git a/rules/rule-identify-blocklisted-image-registries/test/workloads/data.json b/rules/rule-identify-blocklisted-image-registries/test/workloads/data.json new file mode 100644 index 000000000..1ebb4b84d --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/test/workloads/data.json @@ -0,0 +1,7 @@ +{ + "postureControlInputs": { + "publicRegistries": [ + "registry.hub.docker.com" + ] + } +} \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json b/rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json new file mode 100644 index 000000000..9e2873142 --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/test/workloads/expected.json @@ -0,0 +1,29 @@ +[ + { + "alertMessage": "image 'registry.hub.docker.com/php:7.0-apache' in container 'php' comes from untrusted registry", + "reviewPaths": [ + "spec.template.spec.containers[1].image" + ], + "failedPaths": [ + "spec.template.spec.containers[1].image" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 2, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "app": "goproxy" + }, + "name": "my-deployment" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/rule-identify-blocklisted-image-registries/test/workloads/input/deployment.yaml b/rules/rule-identify-blocklisted-image-registries/test/workloads/input/deployment.yaml new file mode 100644 index 000000000..b7889e636 --- /dev/null +++ b/rules/rule-identify-blocklisted-image-registries/test/workloads/input/deployment.yaml @@ -0,0 +1,40 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-deployment + labels: + app : goproxy +spec: + selector: + matchLabels: + app : goproxy + template: + metadata : + name : goproxy + labels : + app : goproxy + spec : + hostNetwork: true + containers : + - + name : mysql + image : mysql + securityContext: + allowPrivilegeEscalation: true + env : + - + name : MYSQL_ROOT_PASSWORD + value : "rootpasswd" + - + name : php + image : registry.hub.docker.com/php:7.0-apache + volumeMounts : + - + mountPath : /var/www/html + name : site-data + subPath : html + volumes : + - + name : site-data + persistentVolumeClaim : + claimName : my-lamp-site-data \ No newline at end of file From 0445164d693cb4e471a3e20ad919d5a87ca3f2f3 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 29 Feb 2024 14:27:36 +0200 Subject: [PATCH 114/195] remove C-0001 from fws Signed-off-by: YiscahLevySilas1 --- exceptions/kubescape-prometheus.json | 6 ------ exceptions/kubescape.json | 3 --- frameworks/allcontrols.json | 6 ------ frameworks/armobest.json | 6 ------ 4 files changed, 21 deletions(-) diff --git a/exceptions/kubescape-prometheus.json b/exceptions/kubescape-prometheus.json index fe83aff47..ae712885a 100644 --- a/exceptions/kubescape-prometheus.json +++ b/exceptions/kubescape-prometheus.json @@ -53,9 +53,6 @@ } ], "posturePolicies": [ - { - "controlID": "c-0001" - }, { "controlID": "c-0078" } @@ -140,9 +137,6 @@ } ], "posturePolicies": [ - { - "controlID": "c-0001" - }, { "controlID": "c-0078" } diff --git a/exceptions/kubescape.json b/exceptions/kubescape.json index 824efe6d9..34b2187ed 100644 --- a/exceptions/kubescape.json +++ b/exceptions/kubescape.json @@ -179,9 +179,6 @@ } ], "posturePolicies": [ - { - "controlID": "c-0001" - }, { "controlID": "c-0078" } diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index e730e11fc..df48e2db8 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -12,12 +12,6 @@ }, "typeTags": ["compliance"], "activeControls": [ - { - "controlID": "C-0001", - "patch": { - "name": "Forbidden Container Registries" - } - }, { "controlID": "C-0002", "patch": { diff --git a/frameworks/armobest.json b/frameworks/armobest.json index bb2196030..aa5c1bd55 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -12,12 +12,6 @@ }, "typeTags": ["compliance"], "activeControls": [ - { - "controlID": "C-0001", - "patch": { - "name": "Forbidden Container Registries" - } - }, { "controlID": "C-0002", "patch": { From 86e9bd6f01deb9b9b961e746859b1809db468e74 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 29 Feb 2024 14:30:44 +0200 Subject: [PATCH 115/195] rm toolchain Signed-off-by: YiscahLevySilas1 --- testrunner/go.mod | 2 -- 1 file changed, 2 deletions(-) diff --git a/testrunner/go.mod b/testrunner/go.mod index 555c20cbf..de259d5a9 100644 --- a/testrunner/go.mod +++ b/testrunner/go.mod @@ -2,8 +2,6 @@ module testrunner go 1.21 -toolchain go1.22.0 - require ( github.com/armosec/armoapi-go v0.0.330 github.com/golang/glog v1.1.2 From e14636f3058f2e6936913e00c873aaf1e4f155ff Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 29 Feb 2024 16:02:47 +0200 Subject: [PATCH 116/195] go mod tidy Signed-off-by: YiscahLevySilas1 --- go.mod | 3 +-- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index d1895abee..68c59d1c3 100644 --- a/go.mod +++ b/go.mod @@ -63,11 +63,10 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.4 // indirect github.com/google/uuid v1.3.0 // indirect diff --git a/go.sum b/go.sum index 8538c5faa..9d5fc8a1e 100644 --- a/go.sum +++ b/go.sum @@ -224,8 +224,7 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -274,9 +273,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= From 180eb2425a131e38d99376b9e74e455b623e0789 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 29 Feb 2024 16:04:36 +0200 Subject: [PATCH 117/195] go mod tidy Signed-off-by: YiscahLevySilas1 --- testrunner/go.mod | 227 ++++------ testrunner/go.sum | 1079 ++++++++++++++++++++++++++------------------- 2 files changed, 728 insertions(+), 578 deletions(-) diff --git a/testrunner/go.mod b/testrunner/go.mod index de259d5a9..7fb8d8fef 100644 --- a/testrunner/go.mod +++ b/testrunner/go.mod @@ -1,174 +1,133 @@ module testrunner -go 1.21 +go 1.19 require ( - github.com/armosec/armoapi-go v0.0.330 - github.com/golang/glog v1.1.2 - github.com/kubescape/k8s-interface v0.0.161 - github.com/kubescape/opa-utils v0.0.277 - github.com/open-policy-agent/opa v0.61.0 - github.com/stretchr/testify v1.8.4 + github.com/armosec/armoapi-go v0.0.119 + github.com/golang/glog v1.0.0 + github.com/kubescape/k8s-interface v0.0.89 + github.com/kubescape/opa-utils v0.0.204 + github.com/open-policy-agent/opa v0.45.0 + github.com/stretchr/testify v1.8.0 gopkg.in/yaml.v3 v3.0.1 ) -require ( - cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.1.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect - github.com/armosec/gojay v1.2.15 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.18.0 // indirect - github.com/aws/aws-sdk-go-v2/service/iam v1.19.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/briandowns/spinner v1.23.0 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-ini/ini v1.67.0 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/gorilla/mux v1.8.1 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/magiconair/properties v1.8.1 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect - github.com/olvrng/ujson v1.1.0 // indirect - github.com/pelletier/go-toml v1.2.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/prometheus/client_golang v1.18.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect - github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/afero v1.6.0 // indirect - github.com/spf13/cast v1.3.0 // indirect - github.com/spf13/jwalterweatherman v1.0.0 // indirect - github.com/spf13/viper v1.7.0 // indirect - github.com/stripe/stripe-go/v74 v74.28.0 // indirect - github.com/subosito/gotenv v1.2.0 // indirect - github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2 // indirect - github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2 // indirect - github.com/uptrace/uptrace-go v1.18.0 // indirect - go.opentelemetry.io/contrib/instrumentation/runtime v0.44.0 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.41.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.41.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/sdk v1.21.0 // indirect - go.opentelemetry.io/otel/sdk/metric v0.41.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect - go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/sync v0.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect - gopkg.in/ini.v1 v1.51.0 // indirect -) +require github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 // indirect require ( - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/container v1.27.1 // indirect + cloud.google.com/go v0.102.1 // indirect + cloud.google.com/go/compute v1.7.0 // indirect + cloud.google.com/go/container v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.27 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect - github.com/armosec/utils-go v0.0.57 // indirect - github.com/armosec/utils-k8s-go v0.0.26 // indirect - github.com/aws/aws-sdk-go-v2 v1.19.1 // indirect - github.com/aws/aws-sdk-go-v2/config v1.18.30 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.29 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37 // indirect - github.com/aws/aws-sdk-go-v2/service/eks v1.28.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.12.14 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.20.1 // indirect - github.com/aws/smithy-go v1.13.5 // indirect + github.com/armosec/utils-go v0.0.12 // indirect + github.com/armosec/utils-k8s-go v0.0.12 // indirect + github.com/aws/aws-sdk-go-v2 v1.16.7 // indirect + github.com/aws/aws-sdk-go-v2/config v1.15.13 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.12.8 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/service/eks v1.21.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.11 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.16.9 // indirect + github.com/aws/smithy-go v1.12.0 // indirect github.com/coreos/go-oidc v2.2.1+incompatible // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/docker/docker v25.0.1+incompatible // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/fatih/color v1.15.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dimchansky/utfbom v1.1.1 // indirect + github.com/docker/docker v20.10.24+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/fatih/color v1.13.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/swag v0.19.14 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v4 v4.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect + github.com/googleapis/gax-go/v2 v2.4.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kubescape/go-logger v0.0.22 // indirect + github.com/kr/pretty v0.2.1 // indirect + github.com/kubescape/go-logger v0.0.6 // indirect github.com/kubescape/rbac-utils v0.0.20 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/pquerna/cachecontrol v0.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pquerna/cachecontrol v0.1.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/yannh/kubeconform v0.6.2 - github.com/yashtewari/glob-intersection v0.2.0 // indirect - go.opencensus.io v0.24.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.14.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.149.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/grpc v1.61.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + github.com/yashtewari/glob-intersection v0.1.0 // indirect + go.opencensus.io v0.23.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.22.0 // indirect + golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/term v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect + google.golang.org/api v0.85.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220708155623-50e5f4832e73 // indirect + google.golang.org/grpc v1.49.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/api v0.29.2 // indirect - k8s.io/apimachinery v0.29.2 // indirect - k8s.io/client-go v0.29.2 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - sigs.k8s.io/controller-runtime v0.15.0 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + k8s.io/api v0.25.3 // indirect + k8s.io/apimachinery v0.25.3 // indirect + k8s.io/client-go v0.25.3 // indirect + k8s.io/klog/v2 v2.70.1 // indirect + k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect + k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect + sigs.k8s.io/controller-runtime v0.12.3 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) replace github.com/libgit2/git2go/v33 => ./git2go diff --git a/testrunner/go.sum b/testrunner/go.sum index aead3d753..60d22354e 100644 --- a/testrunner/go.sum +++ b/testrunner/go.sum @@ -7,211 +7,238 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= -cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/container v1.27.1 h1:ZfLRiFM9ddFE92SlA28rknI6YJMz5Z5huAQK+FKWxIQ= -cloud.google.com/go/container v1.27.1/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/container v1.2.0 h1:LPKlQa4XfBTWdaBSDx/KQ/v45l8FDRzSV0tDpU6e/38= +cloud.google.com/go/container v1.2.0/go.mod h1:Cj2AgMsCUfMVfbGh0Fx7u5Ah/qeC0ajLrqqGGiAdCGw= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0 h1:qtRcg5Y7jNJ4jEzPq4GpWLfTspHdNe2ZK6LjwGcjgmU= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0/go.mod h1:lPneRe3TwsoDRKY4O6YDLXHhEWrD+TIRa8XrV/3/fqw= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.1.1 h1:6A4M8smF+y8nM/DYsLNQz9n7n2ZGaEVqfz8ZWQirQkI= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.1.1/go.mod h1:WqyxV5S0VtXD2+2d6oPqOvyhGubCvzLCKSAKgQ004Uk= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0 h1:1u/K2BFv0MwkG6he8RYuUcbbeK22rkoZbg4lKa/msZU= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0/go.mod h1:U5gpsREQZE6SLk1t/cFfc1eMhYAlYpEzvaYXuDfefy8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/Azure/azure-sdk-for-go v66.0.0+incompatible h1:bmmC38SlE8/E81nNADlgmVGurPWMHDX2YNXVQMrBpEE= +github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= +github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= +github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= +github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armosec/armoapi-go v0.0.330 h1:kvyLshJ3VBqWxDO/hvlpVU1DNsrmkb5M0oStw+Uwxb8= -github.com/armosec/armoapi-go v0.0.330/go.mod h1:6VYIw1hoNU3dTXKckMHNHhzhhPTMXDHtv5AFxvG4Q+U= -github.com/armosec/gojay v1.2.15 h1:sSB2vnAvacUNkw9nzUYZKcPzhJOyk6/5LK2JCNdmoZY= -github.com/armosec/gojay v1.2.15/go.mod h1:vzVAaay2TWJAngOpxu8aqLbye9jMgoKleuAOK+xsOts= -github.com/armosec/utils-go v0.0.57 h1:0RaqexK+t7HeKWfldBv2C1JiLLGuUx9FP0DGWDNRJpg= -github.com/armosec/utils-go v0.0.57/go.mod h1:4wfINE8JTQ6EHvSL2jki0Q3/D1j6oDi6sxxrtAEug74= -github.com/armosec/utils-k8s-go v0.0.26 h1:gVSV1mrALyphaesc+JXbx9SfbxLqfgg1KvvC1/0Hfkk= -github.com/armosec/utils-k8s-go v0.0.26/go.mod h1:WL2brx3tszxeSl1yHac0oAVJUg3o22HYh1dPjaSfjXU= -github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.19.1 h1:STs0lbbpXu3byTPcnRLghs2DH0yk9qKDo27TyyJSKsM= -github.com/aws/aws-sdk-go-v2 v1.19.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2/config v1.18.30 h1:TTAXQIn31qYFUQjkW6siVrRTX1ux+sADZDOe3jsZcMg= -github.com/aws/aws-sdk-go-v2/config v1.18.30/go.mod h1:+YogjT7e/t9JVu/sOnZZgxTge1G+bPNk8zOaI0QIQvE= -github.com/aws/aws-sdk-go-v2/credentials v1.13.29 h1:KNgCpThGuZyCjq9EuuqoLDenKKMwO/x1Xx01ckDa7VI= -github.com/aws/aws-sdk-go-v2/credentials v1.13.29/go.mod h1:VMq1LcmSEa9qxBlOCYTjVuGJWEEzhGmgL552jQsmhss= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6 h1:kortK122LvTU34CGX/F9oJpelXKkEA2j/MW48II+8+8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6/go.mod h1:k7IPHyHNIASI0m0RwOmCjWOTtgG+J0raqwuHH8WhWJE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36 h1:kbk81RlPoC6e4co7cQx2FAvH9TgbzxIqCqiosAFiB+w= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36/go.mod h1:T8Jsn/uNL/AFOXrVYQ1YQaN1r9gN34JU1855/Lyjv+o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30 h1:lMl8S5SB8jNCB+Sty2Em4lnu3IJytceHQd7qbmfqKL0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30/go.mod h1:v3GSCnFxbHzt9dlWBqvA1K1f9lmWuf4ztupZBCAIVs4= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37 h1:BXiqvN7WuV/pMhz8CivhO8cG8icJcjnjHumif4ukQ0c= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37/go.mod h1:d4GZ62cjnz/hjKFdAu11gAwK73bdhqaFv2O4J1gaqIs= -github.com/aws/aws-sdk-go-v2/service/ecr v1.18.0 h1:5RVanD+P+L2W9WU07/8J/A52vnQi7F3ClBdWQttgYlg= -github.com/aws/aws-sdk-go-v2/service/ecr v1.18.0/go.mod h1:9yGOFsa2OcdyePojE89xNGtdBusTyc8ocjpiuFtFc0g= -github.com/aws/aws-sdk-go-v2/service/eks v1.28.1 h1:SA+98Rnehl2KXewvGXc2Lw2ns3Y4t9jdMHmEY5hcNws= -github.com/aws/aws-sdk-go-v2/service/eks v1.28.1/go.mod h1:cQRkgJKg6s9AIzFZ+i4pXdm+/3Fw4MuPNqCdMvSaqns= -github.com/aws/aws-sdk-go-v2/service/iam v1.19.0 h1:9vCynoqC+dgxZKrsjvAniyIopsv3RZFsZ6wkQ+yxtj8= -github.com/aws/aws-sdk-go-v2/service/iam v1.19.0/go.mod h1:OyAuvpFeSVNppcSsp1hFOVQcaTRc1LE24YIR7pMbbAA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30 h1:UcVZxLVNY4yayCmiG94Ge3l2qbc5WEB/oa4RmjoQEi0= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30/go.mod h1:wPffyJiWWtHwvpFyn23WjAjVjMnlQOQrl02+vutBh3Y= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.14 h1:gUjz7trfz9qBm0AlkKTvJHBXELi1wvw+2LA9GfD2AsM= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.14/go.mod h1:9kfRdJgLCbnyeqZ/DpaSwcgj9ZDYLfRpe8Sze+NrYfQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14 h1:8bEtxV5UT9ucdWGXfZ7CM3caQhSHGjWnTHt0OeF7m7s= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14/go.mod h1:nd9BG2UnexN2sDx/mk2Jd6pf3d2E61AiA8m8Fdvdx8Y= -github.com/aws/aws-sdk-go-v2/service/sts v1.20.1 h1:U7h9CPoyMfVoN5jUglB0LglCMP10AK4vMBsbsCKM8Yw= -github.com/aws/aws-sdk-go-v2/service/sts v1.20.1/go.mod h1:BUHusg4cOA1TFGegj7x8/eoWrbdHzJfoMrXcbMQAG0k= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/armosec/armoapi-go v0.0.119 h1:7XbvBbOKp26Bpp72LQ8Spw4FBpbXu3+qZFQyPEwTPFk= +github.com/armosec/armoapi-go v0.0.119/go.mod h1:2zoNzb3Fy9ZByeczJZ47ftDRLRzTykVdTISS3GTc/JU= +github.com/armosec/utils-go v0.0.12 h1:NXkG/BhbSVAmTVXr0qqsK02CmxEiXuJyPmdTRcZ4jAo= +github.com/armosec/utils-go v0.0.12/go.mod h1:F/K1mI/qcj7fNuJl7xktoCeHM83azOF0Zq6eC2WuPyU= +github.com/armosec/utils-k8s-go v0.0.12 h1:u7kHSUp4PpvPP3hEaRXMbM0Vw23IyLhAzzE+2TW6Jkk= +github.com/armosec/utils-k8s-go v0.0.12/go.mod h1:rPHiOaHefWa9ujspwvYYAp0uEbqGGyAMiNrFa/Gpp/8= +github.com/aws/aws-sdk-go-v2 v1.16.7 h1:zfBwXus3u14OszRxGcqCDS4MfMCv10e8SMJ2r8Xm0Ns= +github.com/aws/aws-sdk-go-v2 v1.16.7/go.mod h1:6CpKuLXg2w7If3ABZCl/qZ6rEgwtjZTn4eAf4RcEyuw= +github.com/aws/aws-sdk-go-v2/config v1.15.13 h1:CJH9zn/Enst7lDiGpoguVt0lZr5HcpNVlRJWbJ6qreo= +github.com/aws/aws-sdk-go-v2/config v1.15.13/go.mod h1:AcMu50uhV6wMBUlURnEXhr9b3fX6FLSTlEV89krTEGk= +github.com/aws/aws-sdk-go-v2/credentials v1.12.8 h1:niTa7zc7uyOP2ufri0jPESBt1h9yP3Zc0q+xzih3h8o= +github.com/aws/aws-sdk-go-v2/credentials v1.12.8/go.mod h1:P2Hd4Sy7mXRxPNcQMPBmqszSJoDXexX8XEDaT6lucO0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8 h1:VfBdn2AxwMbFyJN/lF/xuT3SakomJ86PZu3rCxb5K0s= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8/go.mod h1:oL1Q3KuCq1D4NykQnIvtRiBGLUXhcpY5pl6QZB2XEPU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14 h1:2C0pYHcUBmdzPj+EKNC4qj97oK6yjrUhc1KoSodglvk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14/go.mod h1:kdjrMwHwrC3+FsKhNcCMJ7tUVj/8uSD5CZXeQ4wV6fM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8 h1:2J+jdlBJWEmTyAwC82Ym68xCykIvnSnIN18b8xHGlcc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8/go.mod h1:ZIV8GYoC6WLBW5KGs+o4rsc65/ozd+eQ0L31XF5VDwk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15 h1:QquxR7NH3ULBsKC+NoTpilzbKKS+5AELfNREInbhvas= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15/go.mod h1:Tkrthp/0sNBShQQsamR7j/zY4p19tVTAs+nnqhH6R3c= +github.com/aws/aws-sdk-go-v2/service/eks v1.21.4 h1:qmKWieiIiYwD46GRD6nxFc1KsyR0ChGRid8emb7rDEY= +github.com/aws/aws-sdk-go-v2/service/eks v1.21.4/go.mod h1:Th2+t6mwi0bZayXUOFOTuyWR2nwRUVcadDy4WGE8C2E= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8 h1:oKnAXxSF2FUvfgw8uzU/v9OTYorJJZ8eBmWhr9TWVVQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8/go.mod h1:rDVhIMAX9N2r8nWxDUlbubvvaFMnfsm+3jAV7q+rpM4= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.11 h1:XOJWXNFXJyapJqQuCIPfftsOf0XZZioM0kK6OPRt9MY= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.11/go.mod h1:MO4qguFjs3wPGcCSpQ7kOFTwRvb+eu+fn+1vKleGHUk= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.9 h1:yOfILxyjmtr2ubRkRJldlHDFBhf5vw4CzhbwWIBmimQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.9/go.mod h1:O1IvkYxr+39hRf960Us6j0x1P8pDqhTX+oXM5kQNl/Y= +github.com/aws/smithy-go v1.12.0 h1:gXpeZel/jPoWQ7OEmLIgCUnhkFftqNfwWUwAHSlp1v0= +github.com/aws/smithy-go v1.12.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= -github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= -github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/bytecodealliance/wasmtime-go v1.0.0 h1:9u9gqaUiaJeN5IoD1L7egD8atOnTGyJcNp8BhkL9cUU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= -github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgraph-io/badger/v3 v3.2103.2 h1:dpyM5eCJAtQCBcMCZcT4UBZchuTJgCywerHHgmxfxM8= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.1+incompatible h1:k5TYd5rIVQRSqcTwCID+cyVA0yRg86+Pcrz1ls0/frA= -github.com/docker/docker v25.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= +github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= -github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897 h1:E52jfcE64UG42SwLmrW0QByONfGynWuzBvm86BoB9z8= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= -github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -219,96 +246,101 @@ github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200j github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= @@ -316,70 +348,51 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= -github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubescape/go-logger v0.0.22 h1:gle7wH6emOiGv9ljdpVi82pWLQ3jGucrUucvil6JXHE= -github.com/kubescape/go-logger v0.0.22/go.mod h1:x3HBpZo3cMT/WIdy18BxvVVd5D0e/PWFVk/HiwBNu3g= -github.com/kubescape/k8s-interface v0.0.161 h1:v6b3/kmA4o/2niNrejrbXj5X9MLfH0UrpI3s+e/fdwc= -github.com/kubescape/k8s-interface v0.0.161/go.mod h1:oF+Yxug3Kpfu9Yr2j63wy7gwswrKXpiqI0mLk/7gF/s= -github.com/kubescape/opa-utils v0.0.277 h1:nlzhvHZE0mAQ6YTtNgod4nI0wKwL9/7yCynobbKn2go= -github.com/kubescape/opa-utils v0.0.277/go.mod h1:N/UnbZHpoiHQH7O50yadhIXZvVl0IVtTGBmePPrSQSg= +github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA= +github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI= +github.com/kubescape/k8s-interface v0.0.89 h1:OtlvZosHpjlbHfsilfQk2wRbuBnxwF0e+WZX6GbkfLU= +github.com/kubescape/k8s-interface v0.0.89/go.mod h1:pgFRs20mHiavf6+fFWY7h/f8HuKlwuZwirvjxiKJlu0= +github.com/kubescape/opa-utils v0.0.204 h1:9O9drjyzjOhI7Xi2S4Px0WKa66U5GFPQqeOLvhDqHnw= +github.com/kubescape/opa-utils v0.0.204/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg= github.com/kubescape/rbac-utils v0.0.20 h1:1MMxsCsCZ3ntDi8f9ZYYcY+K7bv50bDW5ZvnGnhMhJw= github.com/kubescape/rbac-utils v0.0.20/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= -github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -390,71 +403,43 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olvrng/ujson v1.1.0 h1:8xVUzVlqwdMVWh5d1UHBtLQ1D50nxoPuPEq9Wozs8oA= -github.com/olvrng/ujson v1.1.0/go.mod h1:Mz4G3RODTUfbkKyvi0lgmPx/7vd3Saksk+1jgk8s9xo= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/open-policy-agent/opa v0.61.0 h1:nhncQ2CAYtQTV/SMBhDDPsCpCQsUW+zO/1j+T5V7oZg= -github.com/open-policy-agent/opa v0.61.0/go.mod h1:7OUuzJnsS9yHf8lw0ApfcbrnaRG1EkN3J2fuuqi4G/E= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= +github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs= +github.com/open-policy-agent/opa v0.45.0/go.mod h1:/OnsYljNEWJ6DXeFOOnoGn8CvwZGMUS4iRqzYdJvmBI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k= -github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 h1:lEOLY2vyGIqKWUI9nzsOJRV3mb3WC9dXYORsLEUcoeY= github.com/santhosh-tekuri/jsonschema/v5 v5.1.1/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -478,134 +463,83 @@ github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1l github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stripe/stripe-go/v74 v74.28.0 h1:ItzPPy+cjMKbR3Oihknt/8dv6PANp3hTThUGZjhF9lc= -github.com/stripe/stripe-go/v74 v74.28.0/go.mod h1:f9L6LvaXa35ja7eyvP6GQswoaIPaBRvGAimAO+udbBw= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2 h1:CNznWHkrbA6o1q2H/BsH4tIHf4zbKNtndeoV+AH8z0U= -github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2/go.mod h1:7YSrHCmYPHIXjTWnKSU7EGT0TFEcm3WwSeQquwCGg38= -github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2 h1:uyrW06oJi4iWvhjPLVfk4qrSP2Zm0AMozKKDmp6i4pE= -github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2/go.mod h1:PMAs2dNxP55lgt6xu0if+Jasm6s+Xpmqn6ev1NyDfnI= -github.com/uptrace/uptrace-go v1.18.0 h1:RY15qy19C0irbe2UCxQbjenk8WyUdvUV756R9ZpqCGI= -github.com/uptrace/uptrace-go v1.18.0/go.mod h1:BUW3sFgEyRmZIxts4cv6TGaJnWAW95uW78GIiSdChOQ= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yannh/kubeconform v0.6.2 h1:xjUxiCcqTBofTsM3UT6fNb/tKRfqjakNfWvHRa3sGOo= github.com/yannh/kubeconform v0.6.2/go.mod h1:4E6oaL+lh7KgCG2SaOabeeAFBkyKu5D9ab0OEekGcbs= -github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= -github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg= +github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/contrib/instrumentation/runtime v0.44.0 h1:TXu20nL4yYfJlQeqG/D3Ia6b0p2HZmLfJto9hqJTQ/c= -go.opentelemetry.io/contrib/instrumentation/runtime v0.44.0/go.mod h1:tQ5gBnfjndV1su3+DiLuu6rnd9hBBzg4rkRILnjSNFg= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.41.0 h1:k0k7hFNDd8K4iOMJXj7s8sHaC4mhTlAeppRmZXLgZ6k= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.41.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.41.0 h1:HgbDTD8pioFdY3NRc/YCvsWjqQPtweGyXxa32LgnTOw= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.41.0/go.mod h1:tmvt/yK5Es5d6lHYWerLSOna8lCEfrBVX/a9M0ggqss= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0 h1:hSWWvDjXHVLq9DkmB+77fl8v7+t+yYiS+eNkiplDK54= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0/go.mod h1:zG7KQql1WjZCaUJd+L/ReSYx4bjbYJxg5ws9ws+mYes= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk/metric v0.41.0 h1:c3sAt9/pQ5fSIUfl0gPtClV3HhE18DCVzByD33R/zsk= -go.opentelemetry.io/otel/sdk/metric v0.41.0/go.mod h1:PmOmSt+iOklKtIg5O4Vz9H/ttcRFSNTgii+E1KGyn1w= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= +go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -616,22 +550,27 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -641,21 +580,66 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= -golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 h1:VnGaRqoLmqZH/3TMLJwYCEWkR4j1nuIU1U9TvbqsDUw= +golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -663,18 +647,15 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -683,37 +664,82 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -722,7 +748,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -732,17 +757,51 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -751,16 +810,52 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= -google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0 h1:8rJoHuRxx+vCmZtAO/3k1dRLvYNVyTJtZ5oaFZvhgvc= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -774,13 +869,79 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220708155623-50e5f4832e73 h1:sdZWfcGN37Dv0QWIhuasQGMzAQJOL2oqnvot4/kPgfQ= +google.golang.org/genproto v0.0.0-20220708155623-50e5f4832e73/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -789,10 +950,35 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -801,37 +987,37 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -839,26 +1025,31 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= -k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= -k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= -k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= -k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= +k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI= +k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= +k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= +k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= +k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= -sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= +sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= From eb2f51bb49df0d9bd11c1a64da2f179c88bd4b9e Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 29 Feb 2024 16:16:04 +0200 Subject: [PATCH 118/195] add related resources Signed-off-by: YiscahLevySilas1 --- .../raw.rego | 8 +- .../test/fail-wl-creates-pod/expected.json | 49 ++ .../test/fail-wl-gets-secrets/expected.json | 50 ++ testrunner/go.mod | 201 ++--- testrunner/go.sum | 689 +++++++----------- 5 files changed, 494 insertions(+), 503 deletions(-) diff --git a/rules/workload-with-cluster-takeover-roles/raw.rego b/rules/workload-with-cluster-takeover-roles/raw.rego index 29ccdb5e2..4111702cc 100644 --- a/rules/workload-with-cluster-takeover-roles/raw.rego +++ b/rules/workload-with-cluster-takeover-roles/raw.rego @@ -38,10 +38,16 @@ deny[msga] { "k8sApiObjects": [wl] }, "relatedObjects": [{ + "object": sa, + }, + { "object": rolebinding, "reviewPaths": [reviewPath], "deletePaths": [deletePath], - }] + }, + { + "object": role, + },] } } diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json index a8d9266e3..1202efe09 100644 --- a/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json @@ -20,6 +20,29 @@ ] }, "relatedObjects": [ + { + "object": { + "apiVersion": "v1", + "automountServiceAccountToken": true, + "kind": "ServiceAccount", + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] + }, + "failedPaths": null, + "reviewPaths": null, + "deletePaths": null, + "fixPaths": null + }, { "object": { "apiVersion": "rbac.authorization.k8s.io/v1", @@ -53,6 +76,32 @@ "subjects[1]" ], "fixPaths": null + }, + { + "object": { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": { + "name": "test" + }, + "rules": [ + { + "apiGroups": [ + "" + ], + "resources": [ + "pods" + ], + "verbs": [ + "create" + ] + } + ] + }, + "failedPaths": null, + "reviewPaths": null, + "deletePaths": null, + "fixPaths": null } ] } diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json index a93d443cd..968a65fff 100644 --- a/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json @@ -20,6 +20,29 @@ ] }, "relatedObjects": [ + { + "object": { + "apiVersion": "v1", + "automountServiceAccountToken": true, + "kind": "ServiceAccount", + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] + }, + "failedPaths": null, + "reviewPaths": null, + "deletePaths": null, + "fixPaths": null + }, { "object": { "apiVersion": "rbac.authorization.k8s.io/v1", @@ -53,6 +76,33 @@ "subjects[0]" ], "fixPaths": null + }, + { + "object": { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": { + "name": "test" + }, + "rules": [ + { + "apiGroups": [ + "*" + ], + "resources": [ + "secrets", + "users" + ], + "verbs": [ + "get" + ] + } + ] + }, + "failedPaths": null, + "reviewPaths": null, + "deletePaths": null, + "fixPaths": null } ] } diff --git a/testrunner/go.mod b/testrunner/go.mod index 7fb8d8fef..befc96f80 100644 --- a/testrunner/go.mod +++ b/testrunner/go.mod @@ -3,130 +3,163 @@ module testrunner go 1.19 require ( - github.com/armosec/armoapi-go v0.0.119 - github.com/golang/glog v1.0.0 - github.com/kubescape/k8s-interface v0.0.89 - github.com/kubescape/opa-utils v0.0.204 - github.com/open-policy-agent/opa v0.45.0 - github.com/stretchr/testify v1.8.0 + github.com/armosec/armoapi-go v0.0.256 + github.com/golang/glog v1.1.1 + github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847 + github.com/kubescape/opa-utils v0.0.272 + github.com/open-policy-agent/opa v0.55.0 + github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v3 v3.0.1 ) -require github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 // indirect +require ( + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.1.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/armosec/gojay v1.2.15 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.18.0 // indirect + github.com/aws/aws-sdk-go-v2/service/iam v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/s2a-go v0.1.4 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/stripe/stripe-go/v74 v74.28.0 // indirect + github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2 // indirect + github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2 // indirect + github.com/uptrace/uptrace-go v1.16.0 // indirect + go.opentelemetry.io/contrib/instrumentation/runtime v0.42.0 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/sdk v1.16.0 // indirect + go.opentelemetry.io/otel/sdk/metric v0.39.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.opentelemetry.io/proto/otlp v0.19.0 // indirect + golang.org/x/exp v0.0.0-20230728194245-b0cb94b80691 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect +) require ( - cloud.google.com/go v0.102.1 // indirect - cloud.google.com/go/compute v1.7.0 // indirect - cloud.google.com/go/container v1.2.0 // indirect - github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.27 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect + cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go/container v1.24.0 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect - github.com/armosec/utils-go v0.0.12 // indirect - github.com/armosec/utils-k8s-go v0.0.12 // indirect - github.com/aws/aws-sdk-go-v2 v1.16.7 // indirect - github.com/aws/aws-sdk-go-v2/config v1.15.13 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.12.8 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15 // indirect - github.com/aws/aws-sdk-go-v2/service/eks v1.21.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.11 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.16.9 // indirect - github.com/aws/smithy-go v1.12.0 // indirect + github.com/armosec/utils-go v0.0.20 // indirect + github.com/armosec/utils-k8s-go v0.0.16 // indirect + github.com/aws/aws-sdk-go-v2 v1.19.1 // indirect + github.com/aws/aws-sdk-go-v2/config v1.18.30 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.29 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37 // indirect + github.com/aws/aws-sdk-go-v2/service/eks v1.28.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.12.14 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.20.1 // indirect + github.com/aws/smithy-go v1.13.5 // indirect github.com/coreos/go-oidc v2.2.1+incompatible // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/docker v20.10.24+incompatible // indirect + github.com/docker/docker v24.0.5+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.2.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.8 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.11.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kr/pretty v0.2.1 // indirect - github.com/kubescape/go-logger v0.0.6 // indirect + github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525 // indirect github.com/kubescape/rbac-utils v0.0.20 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect + github.com/opencontainers/image-spec v1.1.0-rc4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/pquerna/cachecontrol v0.1.0 // indirect + github.com/pquerna/cachecontrol v0.2.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/yannh/kubeconform v0.6.2 - github.com/yashtewari/glob-intersection v0.1.0 // indirect - go.opencensus.io v0.23.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - go.uber.org/zap v1.22.0 // indirect - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - google.golang.org/api v0.85.0 // indirect + github.com/yashtewari/glob-intersection v0.2.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/crypto v0.11.0 // indirect + golang.org/x/net v0.12.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/term v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/api v0.126.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220708155623-50e5f4832e73 // indirect - google.golang.org/grpc v1.49.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/grpc v1.56.2 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/api v0.25.3 // indirect - k8s.io/apimachinery v0.25.3 // indirect - k8s.io/client-go v0.25.3 // indirect - k8s.io/klog/v2 v2.70.1 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect - sigs.k8s.io/controller-runtime v0.12.3 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + k8s.io/api v0.27.4 // indirect + k8s.io/apimachinery v0.27.4 // indirect + k8s.io/client-go v0.27.4 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/controller-runtime v0.15.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/testrunner/go.sum b/testrunner/go.sum index 60d22354e..1261d01a4 100644 --- a/testrunner/go.sum +++ b/testrunner/go.sum @@ -15,41 +15,21 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/container v1.2.0 h1:LPKlQa4XfBTWdaBSDx/KQ/v45l8FDRzSV0tDpU6e/38= -cloud.google.com/go/container v1.2.0/go.mod h1:Cj2AgMsCUfMVfbGh0Fx7u5Ah/qeC0ajLrqqGGiAdCGw= +cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/container v1.24.0 h1:N51t/cgQJFqDD/W7Mb+IvmAPHrf8AbPx7Bb7aF4lROE= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -59,108 +39,105 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/azure-sdk-for-go v66.0.0+incompatible h1:bmmC38SlE8/E81nNADlgmVGurPWMHDX2YNXVQMrBpEE= -github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= -github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= -github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0 h1:qtRcg5Y7jNJ4jEzPq4GpWLfTspHdNe2ZK6LjwGcjgmU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0/go.mod h1:lPneRe3TwsoDRKY4O6YDLXHhEWrD+TIRa8XrV/3/fqw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.1.1 h1:6A4M8smF+y8nM/DYsLNQz9n7n2ZGaEVqfz8ZWQirQkI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.1.1/go.mod h1:WqyxV5S0VtXD2+2d6oPqOvyhGubCvzLCKSAKgQ004Uk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0 h1:1u/K2BFv0MwkG6he8RYuUcbbeK22rkoZbg4lKa/msZU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0/go.mod h1:U5gpsREQZE6SLk1t/cFfc1eMhYAlYpEzvaYXuDfefy8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/armosec/armoapi-go v0.0.119 h1:7XbvBbOKp26Bpp72LQ8Spw4FBpbXu3+qZFQyPEwTPFk= -github.com/armosec/armoapi-go v0.0.119/go.mod h1:2zoNzb3Fy9ZByeczJZ47ftDRLRzTykVdTISS3GTc/JU= -github.com/armosec/utils-go v0.0.12 h1:NXkG/BhbSVAmTVXr0qqsK02CmxEiXuJyPmdTRcZ4jAo= -github.com/armosec/utils-go v0.0.12/go.mod h1:F/K1mI/qcj7fNuJl7xktoCeHM83azOF0Zq6eC2WuPyU= -github.com/armosec/utils-k8s-go v0.0.12 h1:u7kHSUp4PpvPP3hEaRXMbM0Vw23IyLhAzzE+2TW6Jkk= -github.com/armosec/utils-k8s-go v0.0.12/go.mod h1:rPHiOaHefWa9ujspwvYYAp0uEbqGGyAMiNrFa/Gpp/8= -github.com/aws/aws-sdk-go-v2 v1.16.7 h1:zfBwXus3u14OszRxGcqCDS4MfMCv10e8SMJ2r8Xm0Ns= -github.com/aws/aws-sdk-go-v2 v1.16.7/go.mod h1:6CpKuLXg2w7If3ABZCl/qZ6rEgwtjZTn4eAf4RcEyuw= -github.com/aws/aws-sdk-go-v2/config v1.15.13 h1:CJH9zn/Enst7lDiGpoguVt0lZr5HcpNVlRJWbJ6qreo= -github.com/aws/aws-sdk-go-v2/config v1.15.13/go.mod h1:AcMu50uhV6wMBUlURnEXhr9b3fX6FLSTlEV89krTEGk= -github.com/aws/aws-sdk-go-v2/credentials v1.12.8 h1:niTa7zc7uyOP2ufri0jPESBt1h9yP3Zc0q+xzih3h8o= -github.com/aws/aws-sdk-go-v2/credentials v1.12.8/go.mod h1:P2Hd4Sy7mXRxPNcQMPBmqszSJoDXexX8XEDaT6lucO0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8 h1:VfBdn2AxwMbFyJN/lF/xuT3SakomJ86PZu3rCxb5K0s= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8/go.mod h1:oL1Q3KuCq1D4NykQnIvtRiBGLUXhcpY5pl6QZB2XEPU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14 h1:2C0pYHcUBmdzPj+EKNC4qj97oK6yjrUhc1KoSodglvk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14/go.mod h1:kdjrMwHwrC3+FsKhNcCMJ7tUVj/8uSD5CZXeQ4wV6fM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8 h1:2J+jdlBJWEmTyAwC82Ym68xCykIvnSnIN18b8xHGlcc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8/go.mod h1:ZIV8GYoC6WLBW5KGs+o4rsc65/ozd+eQ0L31XF5VDwk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15 h1:QquxR7NH3ULBsKC+NoTpilzbKKS+5AELfNREInbhvas= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15/go.mod h1:Tkrthp/0sNBShQQsamR7j/zY4p19tVTAs+nnqhH6R3c= -github.com/aws/aws-sdk-go-v2/service/eks v1.21.4 h1:qmKWieiIiYwD46GRD6nxFc1KsyR0ChGRid8emb7rDEY= -github.com/aws/aws-sdk-go-v2/service/eks v1.21.4/go.mod h1:Th2+t6mwi0bZayXUOFOTuyWR2nwRUVcadDy4WGE8C2E= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8 h1:oKnAXxSF2FUvfgw8uzU/v9OTYorJJZ8eBmWhr9TWVVQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8/go.mod h1:rDVhIMAX9N2r8nWxDUlbubvvaFMnfsm+3jAV7q+rpM4= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.11 h1:XOJWXNFXJyapJqQuCIPfftsOf0XZZioM0kK6OPRt9MY= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.11/go.mod h1:MO4qguFjs3wPGcCSpQ7kOFTwRvb+eu+fn+1vKleGHUk= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.9 h1:yOfILxyjmtr2ubRkRJldlHDFBhf5vw4CzhbwWIBmimQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.9/go.mod h1:O1IvkYxr+39hRf960Us6j0x1P8pDqhTX+oXM5kQNl/Y= -github.com/aws/smithy-go v1.12.0 h1:gXpeZel/jPoWQ7OEmLIgCUnhkFftqNfwWUwAHSlp1v0= -github.com/aws/smithy-go v1.12.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/armosec/armoapi-go v0.0.256 h1:eV8WWQ1r+2D0KHhLA6ux6lx67+uqkYe/uVHrOUFqz5c= +github.com/armosec/armoapi-go v0.0.256/go.mod h1:CJT5iH5VF30zjdQYXaQhsAm8IEHtM1T87HcFVXeLX54= +github.com/armosec/gojay v1.2.15 h1:sSB2vnAvacUNkw9nzUYZKcPzhJOyk6/5LK2JCNdmoZY= +github.com/armosec/gojay v1.2.15/go.mod h1:vzVAaay2TWJAngOpxu8aqLbye9jMgoKleuAOK+xsOts= +github.com/armosec/utils-go v0.0.20 h1:bvr+TMumEYdMsGFGSsaQysST7K02nNROFvuajNuKPlw= +github.com/armosec/utils-go v0.0.20/go.mod h1:ZEFiSv8KpTFNT19jHis1IengiF/BGDvg7tHmXo+cwxs= +github.com/armosec/utils-k8s-go v0.0.16 h1:h46PoxAb4OHA2p719PzcAS03lADw4lH4TyRMaZ3ix/g= +github.com/armosec/utils-k8s-go v0.0.16/go.mod h1:QX0QAGlH7KCZq810eO9QjTYqkhjw8cvrr96TZfaUGrk= +github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.19.1 h1:STs0lbbpXu3byTPcnRLghs2DH0yk9qKDo27TyyJSKsM= +github.com/aws/aws-sdk-go-v2 v1.19.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/config v1.18.30 h1:TTAXQIn31qYFUQjkW6siVrRTX1ux+sADZDOe3jsZcMg= +github.com/aws/aws-sdk-go-v2/config v1.18.30/go.mod h1:+YogjT7e/t9JVu/sOnZZgxTge1G+bPNk8zOaI0QIQvE= +github.com/aws/aws-sdk-go-v2/credentials v1.13.29 h1:KNgCpThGuZyCjq9EuuqoLDenKKMwO/x1Xx01ckDa7VI= +github.com/aws/aws-sdk-go-v2/credentials v1.13.29/go.mod h1:VMq1LcmSEa9qxBlOCYTjVuGJWEEzhGmgL552jQsmhss= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6 h1:kortK122LvTU34CGX/F9oJpelXKkEA2j/MW48II+8+8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6/go.mod h1:k7IPHyHNIASI0m0RwOmCjWOTtgG+J0raqwuHH8WhWJE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36 h1:kbk81RlPoC6e4co7cQx2FAvH9TgbzxIqCqiosAFiB+w= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36/go.mod h1:T8Jsn/uNL/AFOXrVYQ1YQaN1r9gN34JU1855/Lyjv+o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30 h1:lMl8S5SB8jNCB+Sty2Em4lnu3IJytceHQd7qbmfqKL0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30/go.mod h1:v3GSCnFxbHzt9dlWBqvA1K1f9lmWuf4ztupZBCAIVs4= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37 h1:BXiqvN7WuV/pMhz8CivhO8cG8icJcjnjHumif4ukQ0c= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37/go.mod h1:d4GZ62cjnz/hjKFdAu11gAwK73bdhqaFv2O4J1gaqIs= +github.com/aws/aws-sdk-go-v2/service/ecr v1.18.0 h1:5RVanD+P+L2W9WU07/8J/A52vnQi7F3ClBdWQttgYlg= +github.com/aws/aws-sdk-go-v2/service/ecr v1.18.0/go.mod h1:9yGOFsa2OcdyePojE89xNGtdBusTyc8ocjpiuFtFc0g= +github.com/aws/aws-sdk-go-v2/service/eks v1.28.1 h1:SA+98Rnehl2KXewvGXc2Lw2ns3Y4t9jdMHmEY5hcNws= +github.com/aws/aws-sdk-go-v2/service/eks v1.28.1/go.mod h1:cQRkgJKg6s9AIzFZ+i4pXdm+/3Fw4MuPNqCdMvSaqns= +github.com/aws/aws-sdk-go-v2/service/iam v1.19.0 h1:9vCynoqC+dgxZKrsjvAniyIopsv3RZFsZ6wkQ+yxtj8= +github.com/aws/aws-sdk-go-v2/service/iam v1.19.0/go.mod h1:OyAuvpFeSVNppcSsp1hFOVQcaTRc1LE24YIR7pMbbAA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30 h1:UcVZxLVNY4yayCmiG94Ge3l2qbc5WEB/oa4RmjoQEi0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30/go.mod h1:wPffyJiWWtHwvpFyn23WjAjVjMnlQOQrl02+vutBh3Y= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.14 h1:gUjz7trfz9qBm0AlkKTvJHBXELi1wvw+2LA9GfD2AsM= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.14/go.mod h1:9kfRdJgLCbnyeqZ/DpaSwcgj9ZDYLfRpe8Sze+NrYfQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14 h1:8bEtxV5UT9ucdWGXfZ7CM3caQhSHGjWnTHt0OeF7m7s= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14/go.mod h1:nd9BG2UnexN2sDx/mk2Jd6pf3d2E61AiA8m8Fdvdx8Y= +github.com/aws/aws-sdk-go-v2/service/sts v1.20.1 h1:U7h9CPoyMfVoN5jUglB0LglCMP10AK4vMBsbsCKM8Yw= +github.com/aws/aws-sdk-go-v2/service/sts v1.20.1/go.mod h1:BUHusg4cOA1TFGegj7x8/eoWrbdHzJfoMrXcbMQAG0k= +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bytecodealliance/wasmtime-go v1.0.0 h1:9u9gqaUiaJeN5IoD1L7egD8atOnTGyJcNp8BhkL9cUU= +github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -169,43 +146,39 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger/v3 v3.2103.2 h1:dpyM5eCJAtQCBcMCZcT4UBZchuTJgCywerHHgmxfxM8= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= -github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= -github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= -github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= +github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897 h1:E52jfcE64UG42SwLmrW0QByONfGynWuzBvm86BoB9z8= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -213,30 +186,33 @@ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw= +github.com/golang/glog v1.1.1/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -250,8 +226,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -267,10 +241,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -284,14 +257,12 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -299,8 +270,6 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -308,39 +277,33 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= @@ -357,42 +320,40 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA= -github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI= -github.com/kubescape/k8s-interface v0.0.89 h1:OtlvZosHpjlbHfsilfQk2wRbuBnxwF0e+WZX6GbkfLU= -github.com/kubescape/k8s-interface v0.0.89/go.mod h1:pgFRs20mHiavf6+fFWY7h/f8HuKlwuZwirvjxiKJlu0= -github.com/kubescape/opa-utils v0.0.204 h1:9O9drjyzjOhI7Xi2S4Px0WKa66U5GFPQqeOLvhDqHnw= -github.com/kubescape/opa-utils v0.0.204/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg= +github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525 h1:9wzR38LebiA58cGxRBnsF78k4eJGnk7UetoTPKkyz2A= +github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525/go.mod h1:Al+yTE+vemECb/Myn2G9+2o2uFmMtphbkQmxf4OEHxE= +github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847 h1:GGuS6pE6KGa5q7j9fkRN3p1eQw16/jLUMnPR8FT3O6M= +github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847/go.mod h1:eBd6few7RYplnNNlHoe6d7jMmoE6Kx1emapJ91euBbY= +github.com/kubescape/opa-utils v0.0.272 h1:hqEuYGf/B2HuqbdVUtSsUGJopfXbQOgl3+KvFAu2Gd8= +github.com/kubescape/opa-utils v0.0.272/go.mod h1:VmplJnkhei6mDna+6z183k/HX6GOPgsXiwIlDW8mhKw= github.com/kubescape/rbac-utils v0.0.20 h1:1MMxsCsCZ3ntDi8f9ZYYcY+K7bv50bDW5ZvnGnhMhJw= github.com/kubescape/rbac-utils v0.0.20/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -405,38 +366,42 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= -github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs= -github.com/open-policy-agent/opa v0.45.0/go.mod h1:/OnsYljNEWJ6DXeFOOnoGn8CvwZGMUS4iRqzYdJvmBI= +github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= +github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= +github.com/open-policy-agent/opa v0.55.0 h1:s7Vm4ph6zDqqP/KzvUSw9fsKVsm9lhbTZhYGxxTK7mo= +github.com/open-policy-agent/opa v0.55.0/go.mod h1:2Vh8fj/bXCqSwGMbBiHGrw+O8yrho6T/fdaHt5ROmaQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= +github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= -github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k= +github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 h1:lEOLY2vyGIqKWUI9nzsOJRV3mb3WC9dXYORsLEUcoeY= github.com/santhosh-tekuri/jsonschema/v5 v5.1.1/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= @@ -463,16 +428,17 @@ github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1l github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -480,11 +446,21 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stripe/stripe-go/v74 v74.28.0 h1:ItzPPy+cjMKbR3Oihknt/8dv6PANp3hTThUGZjhF9lc= +github.com/stripe/stripe-go/v74 v74.28.0/go.mod h1:f9L6LvaXa35ja7eyvP6GQswoaIPaBRvGAimAO+udbBw= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2 h1:CNznWHkrbA6o1q2H/BsH4tIHf4zbKNtndeoV+AH8z0U= +github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2/go.mod h1:7YSrHCmYPHIXjTWnKSU7EGT0TFEcm3WwSeQquwCGg38= +github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2 h1:uyrW06oJi4iWvhjPLVfk4qrSP2Zm0AMozKKDmp6i4pE= +github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2/go.mod h1:PMAs2dNxP55lgt6xu0if+Jasm6s+Xpmqn6ev1NyDfnI= +github.com/uptrace/uptrace-go v1.16.0 h1:yB9vt1hBYYoXWExNx0okubLOjd339d7lH+/5o+Lp+MY= +github.com/uptrace/uptrace-go v1.16.0/go.mod h1:Ssc5wLpoL+9V0qkT5FtrIiru9SY4xb7q1UVLjSpxpCg= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= @@ -493,30 +469,56 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/yannh/kubeconform v0.6.2 h1:xjUxiCcqTBofTsM3UT6fNb/tKRfqjakNfWvHRa3sGOo= github.com/yannh/kubeconform v0.6.2/go.mod h1:4E6oaL+lh7KgCG2SaOabeeAFBkyKu5D9ab0OEekGcbs= -github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg= -github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= +go.opentelemetry.io/contrib/instrumentation/runtime v0.42.0 h1:EbmAUG9hEAMXyfWEasIt2kmh/WmXUznUksChApTgBGc= +go.opentelemetry.io/contrib/instrumentation/runtime v0.42.0/go.mod h1:rD9feqRYP24P14t5kmhNMqsqm1jvKmpx2H2rKVw52V8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 h1:f6BwB2OACc3FCbYVznctQ9V6KK7Vq6CjmYXJ7DeSs4E= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0/go.mod h1:UqL5mZ3qs6XYhDnZaW1Ps4upD+PX6LipH40AoeuIlwU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.39.0 h1:rm+Fizi7lTM2UefJ1TO347fSRcwmIsUAaZmYmIGBRAo= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.39.0/go.mod h1:sWFbI3jJ+6JdjOVepA5blpv/TJ20Hw+26561iMbWcwU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 h1:+XWJd3jf75RXJq29mxbuXhCXFDG3S3R4vBUeSI2P7tE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0/go.mod h1:hqgzBPTf4yONMFgdZvL/bK42R/iinTyVQtiWihs3SZc= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= +go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= +go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= -go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -527,9 +529,9 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -540,6 +542,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230728194245-b0cb94b80691 h1:/yRP+0AN7mf5DkD3BAI6TOFnd51gEoDEb8o35jIFtgw= +golang.org/x/exp v0.0.0-20230728194245-b0cb94b80691/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -553,8 +557,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -563,9 +565,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -582,7 +582,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -598,24 +597,14 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -623,23 +612,9 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 h1:VnGaRqoLmqZH/3TMLJwYCEWkR4j1nuIU1U9TvbqsDUw= -golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -650,9 +625,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -669,7 +643,6 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -682,64 +655,40 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -783,25 +732,13 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -821,31 +758,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0 h1:8rJoHuRxx+vCmZtAO/3k1dRLvYNVyTJtZ5oaFZvhgvc= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -890,58 +804,14 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220708155623-50e5f4832e73 h1:sdZWfcGN37Dv0QWIhuasQGMzAQJOL2oqnvot4/kPgfQ= -google.golang.org/genproto v0.0.0-20220708155623-50e5f4832e73/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -957,28 +827,14 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -992,20 +848,18 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1017,7 +871,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1027,28 +881,27 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= -k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI= -k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= -k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= -k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= -k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/api v0.27.4 h1:0pCo/AN9hONazBKlNUdhQymmnfLRbSZjd5H5H3f0bSs= +k8s.io/api v0.27.4/go.mod h1:O3smaaX15NfxjzILfiln1D8Z3+gEYpjEpiNA/1EVK1Y= +k8s.io/apimachinery v0.27.4 h1:CdxflD4AF61yewuid0fLl6bM4a3q04jWel0IlP+aYjs= +k8s.io/apimachinery v0.27.4/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/client-go v0.27.4 h1:vj2YTtSJ6J4KxaC88P4pMPEQECWMY8gqPqsTgUKzvjk= +k8s.io/client-go v0.27.4/go.mod h1:ragcly7lUlN0SRPk5/ZkGnDjPknzb37TICq07WhI6Xc= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= -sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= +sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= From 83a4384f5c1c5a06b3ebbeb80f8c5ac02d950422 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 29 Feb 2024 16:17:39 +0200 Subject: [PATCH 119/195] update base score Signed-off-by: YiscahLevySilas1 --- controls/C-0267-workloadwithclustertakeoverroles.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controls/C-0267-workloadwithclustertakeoverroles.json b/controls/C-0267-workloadwithclustertakeoverroles.json index 6db278347..0376ae36a 100644 --- a/controls/C-0267-workloadwithclustertakeoverroles.json +++ b/controls/C-0267-workloadwithclustertakeoverroles.json @@ -9,7 +9,7 @@ "long_description": "In Kubernetes, workloads with overly permissive roles pose a significant security risk. When a workload is granted roles that exceed the necessities of its operation, it creates an attack surface for privilege escalation within the cluster. This is especially critical if the roles include permissions for creating, updating, or accessing sensitive resources or secrets. An attacker exploiting such a workload can leverage these excessive privileges to perform unauthorized actions, potentially leading to a full cluster takeover. Ensuring that each service account associated with a workload is limited to permissions that are strictly necessary for its function is crucial in mitigating the risk of cluster takeovers.", "test": "Check if the service account used by a workload has cluster takeover roles.", "controlID": "C-0267", - "baseScore": 3.0, + "baseScore": 6.0, "category": { "name" : "Workload" }, From 55741b4cab515ab84b51c5e092befe9e66b3e53d Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 29 Feb 2024 16:21:46 +0200 Subject: [PATCH 120/195] check explicit rbac kinds Signed-off-by: YiscahLevySilas1 --- rules/workload-with-cluster-takeover-roles/raw.rego | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rules/workload-with-cluster-takeover-roles/raw.rego b/rules/workload-with-cluster-takeover-roles/raw.rego index 4111702cc..21b39d97a 100644 --- a/rules/workload-with-cluster-takeover-roles/raw.rego +++ b/rules/workload-with-cluster-takeover-roles/raw.rego @@ -17,11 +17,11 @@ deny[msga] { # check if sa has cluster takeover roles role := input[_] - endswith(role.kind, "Role") + role.kind in ["Role", "ClusterRole"] is_takeover_role(role) rolebinding := input[_] - endswith(rolebinding.kind, "RoleBinding") + rolebinding.kind in ["RoleBinding", "ClusterRoleBinding"] rolebinding.roleRef.name == role.metadata.name rolebinding.subjects[j].kind == "ServiceAccount" rolebinding.subjects[j].name == sa.metadata.name From d4a1f9f5a4ff18f8b31239670465f0aaa26eae76 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 <80635572+YiscahLevySilas1@users.noreply.github.com> Date: Sun, 3 Mar 2024 12:14:00 +0200 Subject: [PATCH 121/195] split cpu and memory controls (#594) * split cpu and memory controls Signed-off-by: YiscahLevySilas1 * split cpu and memory controls Signed-off-by: YiscahLevySilas1 --------- Signed-off-by: YiscahLevySilas1 --- controls/C-0009-resourcelimits.json | 8 --- controls/C-0268-ensurecpurequestsareset.json | 28 ++++++++ .../C-0269-ensurememoryrequestsareset.json | 28 ++++++++ controls/C-0270-ensurecpulimitsareset.json | 37 ++++++++++ controls/C-0271-ensurememorylimitsareset.json | 37 ++++++++++ exceptions/kube-apiserver.json | 7 +- frameworks/allcontrols.json | 30 ++++---- frameworks/armobest.json | 24 ++++--- frameworks/devopsbest.json | 36 ++++++---- frameworks/nsaframework.json | 18 +++-- frameworks/security.json | 18 +++-- frameworks/workloadscan.json | 24 +++---- rules/resources-cpu-limits/raw.rego | 72 +++++++++++++++++++ rules/resources-cpu-limits/rule.metadata.json | 48 +++++++++++++ .../test/cronjob/expected.json | 27 +++++++ .../test/cronjob/input/cronjob.yaml | 19 +++++ .../test/pod-only-limits/expected.json | 23 ++++++ .../test/pod-only-limits/input/pod.yaml | 23 ++++++ .../test/pod/expected.json | 27 +++++++ .../test/pod/input/pod.yaml | 22 ++++++ .../test/workload/expected.json | 30 ++++++++ .../test/workload/input/deployment.yaml | 31 ++++++++ rules/resources-cpu-requests/raw.rego | 69 ++++++++++++++++++ .../resources-cpu-requests/rule.metadata.json | 48 +++++++++++++ .../test/cronjob/expected.json | 27 +++++++ .../test/cronjob/input/cronjob.yaml | 19 +++++ .../test/pod-only-requests/expected.json | 22 ++++++ .../test/pod-only-requests/input/pod.yaml | 23 ++++++ .../test/pod/expected.json | 27 +++++++ .../test/pod/input/pod.yaml | 22 ++++++ .../test/workload/expected.json | 30 ++++++++ .../test/workload/input/deployment.yaml | 31 ++++++++ rules/resources-memory-limits/raw.rego | 57 +++++++++++++++ .../rule.metadata.json | 48 +++++++++++++ .../test/cronjob/expected.json | 26 +++++++ .../test/cronjob/input/cronjob.yaml | 19 +++++ .../test/pod-only-limits/expected.json | 20 ++++++ .../test/pod-only-limits/input/pod.yaml | 23 ++++++ .../test/pod/expected.json | 26 +++++++ .../test/pod/input/pod.yaml | 22 ++++++ .../test/pod_pass/data.json | 6 ++ .../test/pod_pass/expected.json | 1 + .../test/pod_pass/input/pod.yaml | 15 ++++ .../test/workload/expected.json | 29 ++++++++ .../test/workload/input/deployment.yaml | 31 ++++++++ .../test/workload_passed/deployment1.yaml | 61 ++++++++++++++++ .../test/workload_passed/expected.json | 1 + rules/resources-memory-requests/raw.rego | 58 +++++++++++++++ .../rule.metadata.json | 48 +++++++++++++ .../test/cronjob/expected.json | 26 +++++++ .../test/cronjob/input/cronjob.yaml | 19 +++++ .../test/pod-only-requests/expected.json | 20 ++++++ .../test/pod-only-requests/input/pod.yaml | 23 ++++++ .../test/pod/expected.json | 26 +++++++ .../test/pod/input/pod.yaml | 22 ++++++ .../test/pod_pass/data.json | 6 ++ .../test/pod_pass/expected.json | 1 + .../test/pod_pass/input/pod.yaml | 15 ++++ .../test/workload/expected.json | 29 ++++++++ .../test/workload/input/deployment.yaml | 31 ++++++++ .../test/workload_passed/deployment1.yaml | 61 ++++++++++++++++ .../test/workload_passed/expected.json | 1 + 62 files changed, 1631 insertions(+), 75 deletions(-) create mode 100644 controls/C-0268-ensurecpurequestsareset.json create mode 100644 controls/C-0269-ensurememoryrequestsareset.json create mode 100644 controls/C-0270-ensurecpulimitsareset.json create mode 100644 controls/C-0271-ensurememorylimitsareset.json create mode 100644 rules/resources-cpu-limits/raw.rego create mode 100644 rules/resources-cpu-limits/rule.metadata.json create mode 100644 rules/resources-cpu-limits/test/cronjob/expected.json create mode 100644 rules/resources-cpu-limits/test/cronjob/input/cronjob.yaml create mode 100644 rules/resources-cpu-limits/test/pod-only-limits/expected.json create mode 100644 rules/resources-cpu-limits/test/pod-only-limits/input/pod.yaml create mode 100644 rules/resources-cpu-limits/test/pod/expected.json create mode 100644 rules/resources-cpu-limits/test/pod/input/pod.yaml create mode 100644 rules/resources-cpu-limits/test/workload/expected.json create mode 100644 rules/resources-cpu-limits/test/workload/input/deployment.yaml create mode 100644 rules/resources-cpu-requests/raw.rego create mode 100644 rules/resources-cpu-requests/rule.metadata.json create mode 100644 rules/resources-cpu-requests/test/cronjob/expected.json create mode 100644 rules/resources-cpu-requests/test/cronjob/input/cronjob.yaml create mode 100644 rules/resources-cpu-requests/test/pod-only-requests/expected.json create mode 100644 rules/resources-cpu-requests/test/pod-only-requests/input/pod.yaml create mode 100644 rules/resources-cpu-requests/test/pod/expected.json create mode 100644 rules/resources-cpu-requests/test/pod/input/pod.yaml create mode 100644 rules/resources-cpu-requests/test/workload/expected.json create mode 100644 rules/resources-cpu-requests/test/workload/input/deployment.yaml create mode 100644 rules/resources-memory-limits/raw.rego create mode 100644 rules/resources-memory-limits/rule.metadata.json create mode 100644 rules/resources-memory-limits/test/cronjob/expected.json create mode 100644 rules/resources-memory-limits/test/cronjob/input/cronjob.yaml create mode 100644 rules/resources-memory-limits/test/pod-only-limits/expected.json create mode 100644 rules/resources-memory-limits/test/pod-only-limits/input/pod.yaml create mode 100644 rules/resources-memory-limits/test/pod/expected.json create mode 100644 rules/resources-memory-limits/test/pod/input/pod.yaml create mode 100644 rules/resources-memory-limits/test/pod_pass/data.json create mode 100644 rules/resources-memory-limits/test/pod_pass/expected.json create mode 100644 rules/resources-memory-limits/test/pod_pass/input/pod.yaml create mode 100644 rules/resources-memory-limits/test/workload/expected.json create mode 100644 rules/resources-memory-limits/test/workload/input/deployment.yaml create mode 100644 rules/resources-memory-limits/test/workload_passed/deployment1.yaml create mode 100644 rules/resources-memory-limits/test/workload_passed/expected.json create mode 100644 rules/resources-memory-requests/raw.rego create mode 100644 rules/resources-memory-requests/rule.metadata.json create mode 100644 rules/resources-memory-requests/test/cronjob/expected.json create mode 100644 rules/resources-memory-requests/test/cronjob/input/cronjob.yaml create mode 100644 rules/resources-memory-requests/test/pod-only-requests/expected.json create mode 100644 rules/resources-memory-requests/test/pod-only-requests/input/pod.yaml create mode 100644 rules/resources-memory-requests/test/pod/expected.json create mode 100644 rules/resources-memory-requests/test/pod/input/pod.yaml create mode 100644 rules/resources-memory-requests/test/pod_pass/data.json create mode 100644 rules/resources-memory-requests/test/pod_pass/expected.json create mode 100644 rules/resources-memory-requests/test/pod_pass/input/pod.yaml create mode 100644 rules/resources-memory-requests/test/workload/expected.json create mode 100644 rules/resources-memory-requests/test/workload/input/deployment.yaml create mode 100644 rules/resources-memory-requests/test/workload_passed/deployment1.yaml create mode 100644 rules/resources-memory-requests/test/workload_passed/expected.json diff --git a/controls/C-0009-resourcelimits.json b/controls/C-0009-resourcelimits.json index 7ef821794..16f4c79dd 100644 --- a/controls/C-0009-resourcelimits.json +++ b/controls/C-0009-resourcelimits.json @@ -3,14 +3,6 @@ "attributes": { "controlTypeTags": [ "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } ] }, "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", diff --git a/controls/C-0268-ensurecpurequestsareset.json b/controls/C-0268-ensurecpurequestsareset.json new file mode 100644 index 000000000..a5e309df0 --- /dev/null +++ b/controls/C-0268-ensurecpurequestsareset.json @@ -0,0 +1,28 @@ +{ + "name": "Ensure CPU requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the CPU requests are not set.", + "remediation": "Set the CPU requests or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-cpu-requests" + ], + "controlID": "C-0268", + "baseScore": 3.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management" + } + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } +} \ No newline at end of file diff --git a/controls/C-0269-ensurememoryrequestsareset.json b/controls/C-0269-ensurememoryrequestsareset.json new file mode 100644 index 000000000..290db0b59 --- /dev/null +++ b/controls/C-0269-ensurememoryrequestsareset.json @@ -0,0 +1,28 @@ +{ + "name": "Ensure memory requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the memory requests are not set.", + "remediation": "Set the memory requests or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-memory-requests" + ], + "controlID": "C-0269", + "baseScore": 3.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management" + } + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } +} \ No newline at end of file diff --git a/controls/C-0270-ensurecpulimitsareset.json b/controls/C-0270-ensurecpulimitsareset.json new file mode 100644 index 000000000..edc09d4c1 --- /dev/null +++ b/controls/C-0270-ensurecpulimitsareset.json @@ -0,0 +1,37 @@ +{ + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-cpu-limits" + ], + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management" + } + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } +} \ No newline at end of file diff --git a/controls/C-0271-ensurememorylimitsareset.json b/controls/C-0271-ensurememorylimitsareset.json new file mode 100644 index 000000000..ce56063d2 --- /dev/null +++ b/controls/C-0271-ensurememorylimitsareset.json @@ -0,0 +1,37 @@ +{ + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-memory-limits" + ], + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management" + } + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } +} \ No newline at end of file diff --git a/exceptions/kube-apiserver.json b/exceptions/kube-apiserver.json index 44bb4c6d3..df6985fd1 100644 --- a/exceptions/kube-apiserver.json +++ b/exceptions/kube-apiserver.json @@ -44,13 +44,10 @@ "controlID": "c-0016" }, { - "controlID": "c-0004" + "controlID": "C-0270" }, { - "controlID": "c-0050" - }, - { - "controlID": "c-0009" + "controlID": "C-0271" }, { "controlID": "c-0048" diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index bd1a5bf95..173558c36 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -18,12 +18,6 @@ "name": "Prevent containers from allowing command execution" } }, - { - "controlID": "C-0004", - "patch": { - "name": "Resources memory limit and request" - } - }, { "controlID": "C-0005", "patch": { @@ -36,12 +30,6 @@ "name": "Roles with delete capabilities" } }, - { - "controlID": "C-0009", - "patch": { - "name": "Resource limits" - } - }, { "controlID": "C-0012", "patch": { @@ -186,12 +174,6 @@ "name": "Network mapping" } }, - { - "controlID": "C-0050", - "patch": { - "name": "Resources CPU limit and request" - } - }, { "controlID": "C-0052", "patch": { @@ -377,6 +359,18 @@ "patch": { "name": "Authenticated user has sensitive permissions" } + }, + { + "controlID": "C-0270", + "patch": { + "name": "Ensure CPU limits are set" + } + }, + { + "controlID": "C-0271", + "patch": { + "name": "Ensure memory limits are set" + } } ] } diff --git a/frameworks/armobest.json b/frameworks/armobest.json index 044d9dea3..771d04bf1 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -10,7 +10,9 @@ "file" ] }, - "typeTags": ["compliance"], + "typeTags": [ + "compliance" + ], "activeControls": [ { "controlID": "C-0002", @@ -24,12 +26,6 @@ "name": "API server insecure port is enabled" } }, - { - "controlID": "C-0009", - "patch": { - "name": "Resource limits" - } - }, { "controlID": "C-0012", "patch": { @@ -233,6 +229,18 @@ "patch": { "name": "Check if signature exists" } + }, + { + "controlID": "C-0270", + "patch": { + "name": "Ensure CPU limits are set" + } + }, + { + "controlID": "C-0271", + "patch": { + "name": "Ensure memory limits are set" + } } ] -} +} \ No newline at end of file diff --git a/frameworks/devopsbest.json b/frameworks/devopsbest.json index daaa2212e..d01a4b347 100644 --- a/frameworks/devopsbest.json +++ b/frameworks/devopsbest.json @@ -12,12 +12,6 @@ }, "typeTags": ["compliance"], "activeControls": [ - { - "controlID": "C-0004", - "patch": { - "name": "Resources memory limit and request" - } - }, { "controlID": "C-0018", "patch": { @@ -30,12 +24,6 @@ "name": "Container hostPort" } }, - { - "controlID": "C-0050", - "patch": { - "name": "Resources CPU limit and request" - } - }, { "controlID": "C-0056", "patch": { @@ -83,6 +71,30 @@ "patch": { "name": "Deprecated Kubernetes image registry" } + }, + { + "controlID": "C-0268", + "patch": { + "name": "Ensure CPU requests are set" + } + }, + { + "controlID": "C-0269", + "patch": { + "name": "Ensure memory requests are set" + } + }, + { + "controlID": "C-0270", + "patch": { + "name": "Ensure CPU limits are set" + } + }, + { + "controlID": "C-0271", + "patch": { + "name": "Ensure memory limits are set" + } } ] } diff --git a/frameworks/nsaframework.json b/frameworks/nsaframework.json index 7b2a33609..71c731fd0 100644 --- a/frameworks/nsaframework.json +++ b/frameworks/nsaframework.json @@ -24,12 +24,6 @@ "name": "API server insecure port is enabled" } }, - { - "controlID": "C-0009", - "patch": { - "name": "Resource limits" - } - }, { "controlID": "C-0012", "patch": { @@ -155,6 +149,18 @@ "patch": { "name": "Enforce Kubelet client TLS authentication" } + }, + { + "controlID": "C-0270", + "patch": { + "name": "Ensure CPU limits are set" + } + }, + { + "controlID": "C-0271", + "patch": { + "name": "Ensure memory limits are set" + } } ] } diff --git a/frameworks/security.json b/frameworks/security.json index e5d2415c4..2840c9c17 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -20,12 +20,6 @@ "name": "API server insecure port is enabled" } }, - { - "controlID": "C-0009", - "patch": { - "name": "Resource limits" - } - }, { "controlID": "C-0012", "patch": { @@ -187,6 +181,18 @@ "patch": { "name": "Authenticated user has sensitive permissions" } + }, + { + "controlID": "C-0270", + "patch": { + "name": "Ensure CPU limits are set" + } + }, + { + "controlID": "C-0271", + "patch": { + "name": "Ensure memory limits are set" + } } ] } \ No newline at end of file diff --git a/frameworks/workloadscan.json b/frameworks/workloadscan.json index be2afbeab..f1f8a868c 100644 --- a/frameworks/workloadscan.json +++ b/frameworks/workloadscan.json @@ -26,18 +26,6 @@ "name": "Check if signature exists" } }, - { - "controlID": "C-0004", - "patch": { - "name": "Resources memory limit and request" - } - }, - { - "controlID": "C-0050", - "patch": { - "name": "Resources CPU limit and request" - } - }, { "controlID": "C-0045", "patch": { @@ -134,6 +122,18 @@ "patch": { "name": "Privileged container" } + }, + { + "controlID": "C-0270", + "patch": { + "name": "Ensure CPU limits are set" + } + }, + { + "controlID": "C-0271", + "patch": { + "name": "Ensure memory limits are set" + } } ] } diff --git a/rules/resources-cpu-limits/raw.rego b/rules/resources-cpu-limits/raw.rego new file mode 100644 index 000000000..0b0356470 --- /dev/null +++ b/rules/resources-cpu-limits/raw.rego @@ -0,0 +1,72 @@ +package armo_builtins + + +# ==================================== no CPU limits ============================================= +# Fails if pod does not have container with CPU-limits +deny[msga] { + pod := input[_] + pod.kind == "Pod" + container := pod.spec.containers[i] + not container.resources.limits.cpu + + fixPaths := [{"path": sprintf("spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v does not have CPU-limit or request", [ container.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [], + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [pod] + } + } +} + +# Fails if workload does not have container with CPU-limits +deny[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + not container.resources.limits.cpu + + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [], + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [wl] + } + } +} + +# Fails if cronjob does not have container with CPU-limits +deny[msga] { + wl := input[_] + wl.kind == "CronJob" + container = wl.spec.jobTemplate.spec.template.spec.containers[i] + not container.resources.limits.cpu + + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [], + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [wl] + } + } +} + + diff --git a/rules/resources-cpu-limits/rule.metadata.json b/rules/resources-cpu-limits/rule.metadata.json new file mode 100644 index 000000000..f3cda4488 --- /dev/null +++ b/rules/resources-cpu-limits/rule.metadata.json @@ -0,0 +1,48 @@ +{ + "name": "resources-cpu-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU limits are not set.", + "remediation": "Ensure CPU limits are set.", + "ruleQuery": "armo_builtins" +} \ No newline at end of file diff --git a/rules/resources-cpu-limits/test/cronjob/expected.json b/rules/resources-cpu-limits/test/cronjob/expected.json new file mode 100644 index 000000000..bc2c0e2d5 --- /dev/null +++ b/rules/resources-cpu-limits/test/cronjob/expected.json @@ -0,0 +1,27 @@ +[ + { + "alertMessage": "Container: hello in CronJob: hello does not have CPU-limit or request", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.limits.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-cpu-limits/test/cronjob/input/cronjob.yaml b/rules/resources-cpu-limits/test/cronjob/input/cronjob.yaml new file mode 100644 index 000000000..ea5e131c7 --- /dev/null +++ b/rules/resources-cpu-limits/test/cronjob/input/cronjob.yaml @@ -0,0 +1,19 @@ +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: hello +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: hello + image: busybox:latest + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster diff --git a/rules/resources-cpu-limits/test/pod-only-limits/expected.json b/rules/resources-cpu-limits/test/pod-only-limits/expected.json new file mode 100644 index 000000000..0774d1458 --- /dev/null +++ b/rules/resources-cpu-limits/test/pod-only-limits/expected.json @@ -0,0 +1,23 @@ +[ + { + "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "reviewPaths": [], + "failedPaths": [], + "fixPaths" : [{"path":"spec.containers[1].resources.limits.cpu", "value": "YOUR_VALUE"}], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + } +] + diff --git a/rules/resources-cpu-limits/test/pod-only-limits/input/pod.yaml b/rules/resources-cpu-limits/test/pod-only-limits/input/pod.yaml new file mode 100644 index 000000000..d1207f1bb --- /dev/null +++ b/rules/resources-cpu-limits/test/pod-only-limits/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" diff --git a/rules/resources-cpu-limits/test/pod/expected.json b/rules/resources-cpu-limits/test/pod/expected.json new file mode 100644 index 000000000..aaedc1fbf --- /dev/null +++ b/rules/resources-cpu-limits/test/pod/expected.json @@ -0,0 +1,27 @@ +[ + { + "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[1].resources.limits.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-cpu-limits/test/pod/input/pod.yaml b/rules/resources-cpu-limits/test/pod/input/pod.yaml new file mode 100644 index 000000000..19a64f850 --- /dev/null +++ b/rules/resources-cpu-limits/test/pod/input/pod.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + limits: + memory: "128Mi" diff --git a/rules/resources-cpu-limits/test/workload/expected.json b/rules/resources-cpu-limits/test/workload/expected.json new file mode 100644 index 000000000..a139b8d79 --- /dev/null +++ b/rules/resources-cpu-limits/test/workload/expected.json @@ -0,0 +1,30 @@ +[ + { + "alertMessage": "Container: app in Deployment: test does not have CPU-limit or request", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].resources.limits.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-cpu-limits/test/workload/input/deployment.yaml b/rules/resources-cpu-limits/test/workload/input/deployment.yaml new file mode 100644 index 000000000..28b3afed1 --- /dev/null +++ b/rules/resources-cpu-limits/test/workload/input/deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test + namespace: default + labels: + purpose: demonstrate-command +spec: + selector: + matchLabels: + purpose: demonstrate-command + template: + metadata: + labels: + purpose: demonstrate-command + spec : + containers : + - + name : app + image : images.my-company.example/app:v4 + - + name : log-aggregator + image : images.my-company.example/log-aggregator:v6 + resources : + requests : + memory : "64Mi" + cpu : "250m" + limits : + memory : "128Mi" + cpu : "500m" + \ No newline at end of file diff --git a/rules/resources-cpu-requests/raw.rego b/rules/resources-cpu-requests/raw.rego new file mode 100644 index 000000000..3a0c3d0c2 --- /dev/null +++ b/rules/resources-cpu-requests/raw.rego @@ -0,0 +1,69 @@ +package armo_builtins + +# ==================================== no CPU requests ============================================= +# Fails if pod does not have container with CPU request +deny[msga] { + pod := input[_] + pod.kind == "Pod" + container := pod.spec.containers[i] + not container.resources.requests.cpu + + fixPaths := [{"path": sprintf("spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v does not have CPU-limit or request", [ container.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [], + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [pod] + } + } +} + +# Fails if workload does not have container with CPU requests +deny[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + not container.resources.requests.cpu + + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [], + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [wl] + } + } +} + +# Fails if cronjob does not have container with CPU requests +deny[msga] { + wl := input[_] + wl.kind == "CronJob" + container = wl.spec.jobTemplate.spec.template.spec.containers[i] + not container.resources.requests.cpu + + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have CPU-limit or request", [ container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "reviewPaths": [], + "failedPaths": [], + "fixPaths": fixPaths, + "alertObject": { + "k8sApiObjects": [wl] + } + } +} diff --git a/rules/resources-cpu-requests/rule.metadata.json b/rules/resources-cpu-requests/rule.metadata.json new file mode 100644 index 000000000..6ca821706 --- /dev/null +++ b/rules/resources-cpu-requests/rule.metadata.json @@ -0,0 +1,48 @@ +{ + "name": "resources-cpu-requests", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU requests are not set.", + "remediation": "Ensure CPU requests are set.", + "ruleQuery": "armo_builtins" +} \ No newline at end of file diff --git a/rules/resources-cpu-requests/test/cronjob/expected.json b/rules/resources-cpu-requests/test/cronjob/expected.json new file mode 100644 index 000000000..344180dd5 --- /dev/null +++ b/rules/resources-cpu-requests/test/cronjob/expected.json @@ -0,0 +1,27 @@ +[ + { + "alertMessage": "Container: hello in CronJob: hello does not have CPU-limit or request", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.requests.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-cpu-requests/test/cronjob/input/cronjob.yaml b/rules/resources-cpu-requests/test/cronjob/input/cronjob.yaml new file mode 100644 index 000000000..ea5e131c7 --- /dev/null +++ b/rules/resources-cpu-requests/test/cronjob/input/cronjob.yaml @@ -0,0 +1,19 @@ +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: hello +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: hello + image: busybox:latest + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster diff --git a/rules/resources-cpu-requests/test/pod-only-requests/expected.json b/rules/resources-cpu-requests/test/pod-only-requests/expected.json new file mode 100644 index 000000000..83beae079 --- /dev/null +++ b/rules/resources-cpu-requests/test/pod-only-requests/expected.json @@ -0,0 +1,22 @@ +[ + { + "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "reviewPaths": [], + "failedPaths": [], + "fixPaths" : [{"path": "spec.containers[1].resources.requests.cpu", "value": "YOUR_VALUE"}], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-cpu-requests/test/pod-only-requests/input/pod.yaml b/rules/resources-cpu-requests/test/pod-only-requests/input/pod.yaml new file mode 100644 index 000000000..0495de5d3 --- /dev/null +++ b/rules/resources-cpu-requests/test/pod-only-requests/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + limits: + memory: "128Mi" + cpu: "500m" diff --git a/rules/resources-cpu-requests/test/pod/expected.json b/rules/resources-cpu-requests/test/pod/expected.json new file mode 100644 index 000000000..f17c98b0a --- /dev/null +++ b/rules/resources-cpu-requests/test/pod/expected.json @@ -0,0 +1,27 @@ +[ + { + "alertMessage": "Container: log-aggregator does not have CPU-limit or request", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[1].resources.requests.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-cpu-requests/test/pod/input/pod.yaml b/rules/resources-cpu-requests/test/pod/input/pod.yaml new file mode 100644 index 000000000..19a64f850 --- /dev/null +++ b/rules/resources-cpu-requests/test/pod/input/pod.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + limits: + memory: "128Mi" diff --git a/rules/resources-cpu-requests/test/workload/expected.json b/rules/resources-cpu-requests/test/workload/expected.json new file mode 100644 index 000000000..c6e3a66c6 --- /dev/null +++ b/rules/resources-cpu-requests/test/workload/expected.json @@ -0,0 +1,30 @@ +[ + { + "alertMessage": "Container: app in Deployment: test does not have CPU-limit or request", + "reviewPaths": [], + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].resources.requests.cpu", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-cpu-requests/test/workload/input/deployment.yaml b/rules/resources-cpu-requests/test/workload/input/deployment.yaml new file mode 100644 index 000000000..28b3afed1 --- /dev/null +++ b/rules/resources-cpu-requests/test/workload/input/deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test + namespace: default + labels: + purpose: demonstrate-command +spec: + selector: + matchLabels: + purpose: demonstrate-command + template: + metadata: + labels: + purpose: demonstrate-command + spec : + containers : + - + name : app + image : images.my-company.example/app:v4 + - + name : log-aggregator + image : images.my-company.example/log-aggregator:v6 + resources : + requests : + memory : "64Mi" + cpu : "250m" + limits : + memory : "128Mi" + cpu : "500m" + \ No newline at end of file diff --git a/rules/resources-memory-limits/raw.rego b/rules/resources-memory-limits/raw.rego new file mode 100644 index 000000000..1c307d57d --- /dev/null +++ b/rules/resources-memory-limits/raw.rego @@ -0,0 +1,57 @@ +package armo_builtins + +# ================================== no memory limits ================================== +# Fails if pod does not have container with memory-limits +deny[msga] { + pod := input[_] + pod.kind == "Pod" + container := pod.spec.containers[i] + not container.resources.limits.memory + fixPaths := [{"path": sprintf("spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v does not have memory-limit or request", [container.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [pod]}, + } +} + +# Fails if workload does not have container with memory-limits +deny[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + not container.resources.limits.memory + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [wl]}, + } +} + +# Fails if cronjob does not have container with memory-limits +deny[msga] { + wl := input[_] + wl.kind == "CronJob" + container = wl.spec.jobTemplate.spec.template.spec.containers[i] + not container.resources.limits.memory + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [wl]}, + } +} diff --git a/rules/resources-memory-limits/rule.metadata.json b/rules/resources-memory-limits/rule.metadata.json new file mode 100644 index 000000000..17d2d44e8 --- /dev/null +++ b/rules/resources-memory-limits/rule.metadata.json @@ -0,0 +1,48 @@ +{ + "name": "resources-memory-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory limits are not set.", + "remediation": "Ensure memory limits are set.", + "ruleQuery": "armo_builtins" +} \ No newline at end of file diff --git a/rules/resources-memory-limits/test/cronjob/expected.json b/rules/resources-memory-limits/test/cronjob/expected.json new file mode 100644 index 000000000..151841bc4 --- /dev/null +++ b/rules/resources-memory-limits/test/cronjob/expected.json @@ -0,0 +1,26 @@ +[ + { + "alertMessage": "Container: hello in CronJob: hello does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.limits.memory", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-memory-limits/test/cronjob/input/cronjob.yaml b/rules/resources-memory-limits/test/cronjob/input/cronjob.yaml new file mode 100644 index 000000000..ea5e131c7 --- /dev/null +++ b/rules/resources-memory-limits/test/cronjob/input/cronjob.yaml @@ -0,0 +1,19 @@ +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: hello +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: hello + image: busybox:latest + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster diff --git a/rules/resources-memory-limits/test/pod-only-limits/expected.json b/rules/resources-memory-limits/test/pod-only-limits/expected.json new file mode 100644 index 000000000..6b6a0addf --- /dev/null +++ b/rules/resources-memory-limits/test/pod-only-limits/expected.json @@ -0,0 +1,20 @@ +[{ + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [{ + "path": "spec.containers[1].resources.limits.memory", + "value": "YOUR_VALUE" + }], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + }] + } +}] \ No newline at end of file diff --git a/rules/resources-memory-limits/test/pod-only-limits/input/pod.yaml b/rules/resources-memory-limits/test/pod-only-limits/input/pod.yaml new file mode 100644 index 000000000..7774dea5f --- /dev/null +++ b/rules/resources-memory-limits/test/pod-only-limits/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + cpu: "500m" diff --git a/rules/resources-memory-limits/test/pod/expected.json b/rules/resources-memory-limits/test/pod/expected.json new file mode 100644 index 000000000..c038718d6 --- /dev/null +++ b/rules/resources-memory-limits/test/pod/expected.json @@ -0,0 +1,26 @@ +[ + { + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[1].resources.limits.memory", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-memory-limits/test/pod/input/pod.yaml b/rules/resources-memory-limits/test/pod/input/pod.yaml new file mode 100644 index 000000000..c3e7d26a1 --- /dev/null +++ b/rules/resources-memory-limits/test/pod/input/pod.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + cpu: "250m" + limits: + cpu: "500m" diff --git a/rules/resources-memory-limits/test/pod_pass/data.json b/rules/resources-memory-limits/test/pod_pass/data.json new file mode 100644 index 000000000..7fc81fd94 --- /dev/null +++ b/rules/resources-memory-limits/test/pod_pass/data.json @@ -0,0 +1,6 @@ +{ + "postureControlInputs": { + "memory_limit_max": ["256Mi"], + "memory_request_max": ["128Mi"] + } +} \ No newline at end of file diff --git a/rules/resources-memory-limits/test/pod_pass/expected.json b/rules/resources-memory-limits/test/pod_pass/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/resources-memory-limits/test/pod_pass/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/resources-memory-limits/test/pod_pass/input/pod.yaml b/rules/resources-memory-limits/test/pod_pass/input/pod.yaml new file mode 100644 index 000000000..e84566463 --- /dev/null +++ b/rules/resources-memory-limits/test/pod_pass/input/pod.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" \ No newline at end of file diff --git a/rules/resources-memory-limits/test/workload/expected.json b/rules/resources-memory-limits/test/workload/expected.json new file mode 100644 index 000000000..2a1b71846 --- /dev/null +++ b/rules/resources-memory-limits/test/workload/expected.json @@ -0,0 +1,29 @@ +[ + { + "alertMessage": "Container: app in Deployment: test does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].resources.limits.memory", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-memory-limits/test/workload/input/deployment.yaml b/rules/resources-memory-limits/test/workload/input/deployment.yaml new file mode 100644 index 000000000..28b3afed1 --- /dev/null +++ b/rules/resources-memory-limits/test/workload/input/deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test + namespace: default + labels: + purpose: demonstrate-command +spec: + selector: + matchLabels: + purpose: demonstrate-command + template: + metadata: + labels: + purpose: demonstrate-command + spec : + containers : + - + name : app + image : images.my-company.example/app:v4 + - + name : log-aggregator + image : images.my-company.example/log-aggregator:v6 + resources : + requests : + memory : "64Mi" + cpu : "250m" + limits : + memory : "128Mi" + cpu : "500m" + \ No newline at end of file diff --git a/rules/resources-memory-limits/test/workload_passed/deployment1.yaml b/rules/resources-memory-limits/test/workload_passed/deployment1.yaml new file mode 100644 index 000000000..a1adda28e --- /dev/null +++ b/rules/resources-memory-limits/test/workload_passed/deployment1.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + generation: 19 + labels: + app: dtr-customer-myapp + name: dtr-customer-myapp + namespace: dtr-customer +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: dtr-customer-myapp + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: dtr-customer-myapp + spec: + containers: + - envFrom: + - configMapRef: + name: dtr-customer-myapp-configmap + - secretRef: + name: dtr-customer-myapp-secrets + image: myrepo.domain.com/cre/dtr-customer-myapp:1.1.1 + imagePullPolicy: IfNotPresent + name: dtr-customer-myapp + ports: + - containerPort: 343 + protocol: TCP + resources: + limits: + cpu: 450m + memory: "512Mi" + requests: + cpu: 100m + memory: "200Mi" + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + imagePullSecrets: + - name: myimagesecret + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: dtr-customer-myapp + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway \ No newline at end of file diff --git a/rules/resources-memory-limits/test/workload_passed/expected.json b/rules/resources-memory-limits/test/workload_passed/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/resources-memory-limits/test/workload_passed/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/resources-memory-requests/raw.rego b/rules/resources-memory-requests/raw.rego new file mode 100644 index 000000000..2caf8b372 --- /dev/null +++ b/rules/resources-memory-requests/raw.rego @@ -0,0 +1,58 @@ +package armo_builtins + +# ================================== no memory requests ================================== +# Fails if pod does not have container with memory requests +deny[msga] { + pod := input[_] + pod.kind == "Pod" + container := pod.spec.containers[i] + not container.resources.requests.memory + fixPaths := [{"path": sprintf("spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v does not have memory-limit or request", [container.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [pod]}, + } +} + +# Fails if workload does not have container with memory requests +deny[msga] { + wl := input[_] + spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job"} + spec_template_spec_patterns[wl.kind] + container := wl.spec.template.spec.containers[i] + not container.resources.requests.memory + fixPaths := [{"path": sprintf("spec.template.spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [wl]}, + } +} + +# Fails if cronjob does not have container with memory requests +deny[msga] { + wl := input[_] + wl.kind == "CronJob" + container = wl.spec.jobTemplate.spec.template.spec.containers[i] + not container.resources.requests.memory + fixPaths := [{"path": sprintf("spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory", [format_int(i, 10)]), "value": "YOUR_VALUE"}] + + msga := { + "alertMessage": sprintf("Container: %v in %v: %v does not have memory-limit or request", [container.name, wl.kind, wl.metadata.name]), + "packagename": "armo_builtins", + "alertScore": 7, + "fixPaths": fixPaths, + "failedPaths": [], + "alertObject": {"k8sApiObjects": [wl]}, + } +} + diff --git a/rules/resources-memory-requests/rule.metadata.json b/rules/resources-memory-requests/rule.metadata.json new file mode 100644 index 000000000..aef86df41 --- /dev/null +++ b/rules/resources-memory-requests/rule.metadata.json @@ -0,0 +1,48 @@ +{ + "name": "resources-memory-requests", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory requests are not set.", + "remediation": "Ensure memory requests are set.", + "ruleQuery": "armo_builtins" +} \ No newline at end of file diff --git a/rules/resources-memory-requests/test/cronjob/expected.json b/rules/resources-memory-requests/test/cronjob/expected.json new file mode 100644 index 000000000..7ae170d0b --- /dev/null +++ b/rules/resources-memory-requests/test/cronjob/expected.json @@ -0,0 +1,26 @@ +[ + { + "alertMessage": "Container: hello in CronJob: hello does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].resources.requests.memory", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-memory-requests/test/cronjob/input/cronjob.yaml b/rules/resources-memory-requests/test/cronjob/input/cronjob.yaml new file mode 100644 index 000000000..ea5e131c7 --- /dev/null +++ b/rules/resources-memory-requests/test/cronjob/input/cronjob.yaml @@ -0,0 +1,19 @@ +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: hello +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: hello + image: busybox:latest + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster diff --git a/rules/resources-memory-requests/test/pod-only-requests/expected.json b/rules/resources-memory-requests/test/pod-only-requests/expected.json new file mode 100644 index 000000000..4648d72fc --- /dev/null +++ b/rules/resources-memory-requests/test/pod-only-requests/expected.json @@ -0,0 +1,20 @@ +[{ + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [{ + "path": "spec.containers[1].resources.requests.memory", + "value": "YOUR_VALUE" + }], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + }] + } +}] \ No newline at end of file diff --git a/rules/resources-memory-requests/test/pod-only-requests/input/pod.yaml b/rules/resources-memory-requests/test/pod-only-requests/input/pod.yaml new file mode 100644 index 000000000..d146d134a --- /dev/null +++ b/rules/resources-memory-requests/test/pod-only-requests/input/pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" diff --git a/rules/resources-memory-requests/test/pod/expected.json b/rules/resources-memory-requests/test/pod/expected.json new file mode 100644 index 000000000..d87f0e699 --- /dev/null +++ b/rules/resources-memory-requests/test/pod/expected.json @@ -0,0 +1,26 @@ +[ + { + "alertMessage": "Container: log-aggregator does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.containers[1].resources.requests.memory", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "frontend" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-memory-requests/test/pod/input/pod.yaml b/rules/resources-memory-requests/test/pod/input/pod.yaml new file mode 100644 index 000000000..c3e7d26a1 --- /dev/null +++ b/rules/resources-memory-requests/test/pod/input/pod.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + cpu: "250m" + limits: + cpu: "500m" diff --git a/rules/resources-memory-requests/test/pod_pass/data.json b/rules/resources-memory-requests/test/pod_pass/data.json new file mode 100644 index 000000000..7fc81fd94 --- /dev/null +++ b/rules/resources-memory-requests/test/pod_pass/data.json @@ -0,0 +1,6 @@ +{ + "postureControlInputs": { + "memory_limit_max": ["256Mi"], + "memory_request_max": ["128Mi"] + } +} \ No newline at end of file diff --git a/rules/resources-memory-requests/test/pod_pass/expected.json b/rules/resources-memory-requests/test/pod_pass/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/resources-memory-requests/test/pod_pass/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/resources-memory-requests/test/pod_pass/input/pod.yaml b/rules/resources-memory-requests/test/pod_pass/input/pod.yaml new file mode 100644 index 000000000..e84566463 --- /dev/null +++ b/rules/resources-memory-requests/test/pod_pass/input/pod.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" \ No newline at end of file diff --git a/rules/resources-memory-requests/test/workload/expected.json b/rules/resources-memory-requests/test/workload/expected.json new file mode 100644 index 000000000..e2f67c7eb --- /dev/null +++ b/rules/resources-memory-requests/test/workload/expected.json @@ -0,0 +1,29 @@ +[ + { + "alertMessage": "Container: app in Deployment: test does not have memory-limit or request", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].resources.requests.memory", + "value": "YOUR_VALUE" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "test" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/resources-memory-requests/test/workload/input/deployment.yaml b/rules/resources-memory-requests/test/workload/input/deployment.yaml new file mode 100644 index 000000000..28b3afed1 --- /dev/null +++ b/rules/resources-memory-requests/test/workload/input/deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test + namespace: default + labels: + purpose: demonstrate-command +spec: + selector: + matchLabels: + purpose: demonstrate-command + template: + metadata: + labels: + purpose: demonstrate-command + spec : + containers : + - + name : app + image : images.my-company.example/app:v4 + - + name : log-aggregator + image : images.my-company.example/log-aggregator:v6 + resources : + requests : + memory : "64Mi" + cpu : "250m" + limits : + memory : "128Mi" + cpu : "500m" + \ No newline at end of file diff --git a/rules/resources-memory-requests/test/workload_passed/deployment1.yaml b/rules/resources-memory-requests/test/workload_passed/deployment1.yaml new file mode 100644 index 000000000..a1adda28e --- /dev/null +++ b/rules/resources-memory-requests/test/workload_passed/deployment1.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + generation: 19 + labels: + app: dtr-customer-myapp + name: dtr-customer-myapp + namespace: dtr-customer +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: dtr-customer-myapp + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: dtr-customer-myapp + spec: + containers: + - envFrom: + - configMapRef: + name: dtr-customer-myapp-configmap + - secretRef: + name: dtr-customer-myapp-secrets + image: myrepo.domain.com/cre/dtr-customer-myapp:1.1.1 + imagePullPolicy: IfNotPresent + name: dtr-customer-myapp + ports: + - containerPort: 343 + protocol: TCP + resources: + limits: + cpu: 450m + memory: "512Mi" + requests: + cpu: 100m + memory: "200Mi" + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + imagePullSecrets: + - name: myimagesecret + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: dtr-customer-myapp + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway \ No newline at end of file diff --git a/rules/resources-memory-requests/test/workload_passed/expected.json b/rules/resources-memory-requests/test/workload_passed/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/resources-memory-requests/test/workload_passed/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file From 68bffaf685a45d4799d33c0e6e557306c5355753 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 <80635572+YiscahLevySilas1@users.noreply.github.com> Date: Sun, 3 Mar 2024 14:12:56 +0200 Subject: [PATCH 122/195] add control C-0272 (#595) Signed-off-by: YiscahLevySilas1 --- ...-0272-workloadwithadministrativeroles.json | 22 +++ .../filter.rego | 32 +++++ .../raw.rego | 129 ++++++++++++++++++ .../rule.metadata.json | 63 +++++++++ .../test/fail-wl-creates-pod/expected.json | 110 +++++++++++++++ .../input/clusterrole.yaml | 8 ++ .../input/clusterrolebinding.yaml | 15 ++ .../test/fail-wl-creates-pod/input/file.yaml | 17 +++ .../test/fail-wl-creates-pod/input/sa.json | 17 +++ .../pass-wl-limited-permissions/expected.json | 1 + .../input/clusterrole.yaml | 11 ++ .../input/clusterrolebinding.yaml | 15 ++ .../input/file.yaml | 17 +++ .../pass-wl-limited-permissions/input/sa.json | 17 +++ .../pass-wl-not-mount-sa-token/expected.json | 1 + .../input/clusterrole.yaml | 8 ++ .../input/clusterrolebinding.yaml | 15 ++ .../input/file.yaml | 17 +++ .../pass-wl-not-mount-sa-token/input/sa.json | 17 +++ .../test/pass-wl-rolebinding/expected.json | 1 + .../pass-wl-rolebinding/input/cluterrole.yaml | 8 ++ .../test/pass-wl-rolebinding/input/file.yaml | 17 +++ .../input/rolebinding.yaml | 13 ++ .../test/pass-wl-rolebinding/input/sa.json | 17 +++ 24 files changed, 588 insertions(+) create mode 100644 controls/C-0272-workloadwithadministrativeroles.json create mode 100644 rules/workload-with-administrative-roles/filter.rego create mode 100644 rules/workload-with-administrative-roles/raw.rego create mode 100644 rules/workload-with-administrative-roles/rule.metadata.json create mode 100644 rules/workload-with-administrative-roles/test/fail-wl-creates-pod/expected.json create mode 100644 rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/clusterrole.yaml create mode 100644 rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/clusterrolebinding.yaml create mode 100644 rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/file.yaml create mode 100644 rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/sa.json create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/expected.json create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/clusterrole.yaml create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/clusterrolebinding.yaml create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/file.yaml create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/sa.json create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/expected.json create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/clusterrole.yaml create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/clusterrolebinding.yaml create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/file.yaml create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/sa.json create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-rolebinding/expected.json create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/cluterrole.yaml create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/file.yaml create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/rolebinding.yaml create mode 100644 rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/sa.json diff --git a/controls/C-0272-workloadwithadministrativeroles.json b/controls/C-0272-workloadwithadministrativeroles.json new file mode 100644 index 000000000..3d59e4f04 --- /dev/null +++ b/controls/C-0272-workloadwithadministrativeroles.json @@ -0,0 +1,22 @@ +{ + "name": "Workload with administrative roles", + "attributes": {}, + "description": "This control identifies workloads where the associated service accounts have roles that grant administrative-level access across the cluster. Granting a workload such expansive permissions equates to providing it cluster admin roles. This level of access can pose a significant security risk, as it allows the workload to perform any action on any resource, potentially leading to unauthorized data access or cluster modifications.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use service accounts with such high permissions for daily operations.", + "rulesNames": [ + "workload-with-administrative-roles" + ], + "long_description": "In Kubernetes environments, workloads granted administrative-level privileges without restrictions represent a critical security vulnerability. When a service account associated with a workload is configured with permissions to perform any action on any resource, it essentially holds unrestricted access within the cluster, akin to cluster admin privileges. This configuration dramatically increases the risk of security breaches, including data theft, unauthorized modifications, and potentially full cluster takeovers. Such privileges allow attackers to exploit the workload for wide-ranging malicious activities, bypassing the principle of least privilege. Therefore, it's essential to follow the least privilege principle and make sure cluster admin permissions are granted only when it is absolutely necessary.", + "test": "Check if the service account used by a workload has cluster admin roles, either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges.", + "controlID": "C-0272", + "baseScore": 6.0, + "category": { + "name" : "Workload" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } +} diff --git a/rules/workload-with-administrative-roles/filter.rego b/rules/workload-with-administrative-roles/filter.rego new file mode 100644 index 000000000..a0037a65d --- /dev/null +++ b/rules/workload-with-administrative-roles/filter.rego @@ -0,0 +1,32 @@ +package armo_builtins + +deny[msga] { + wl := input[_] + start_of_path := get_beginning_of_path(wl) + + msga := { + "alertMessage": sprintf("%v: %v in the following namespace: %v mounts service account tokens by default", [wl.kind, wl.metadata.name, wl.metadata.namespace]), + "packagename": "armo_builtins", + "alertScore": 9, + "alertObject": { + "k8sApiObjects": [wl] + }, + } +} + + +get_beginning_of_path(workload) = start_of_path { + spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + spec_template_spec_patterns[workload.kind] + start_of_path := ["spec", "template", "spec"] +} + +get_beginning_of_path(workload) = start_of_path { + workload.kind == "Pod" + start_of_path := ["spec"] +} + +get_beginning_of_path(workload) = start_of_path { + workload.kind == "CronJob" + start_of_path := ["spec", "jobTemplate", "spec", "template", "spec"] +} \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/raw.rego b/rules/workload-with-administrative-roles/raw.rego new file mode 100644 index 000000000..a760b1cd6 --- /dev/null +++ b/rules/workload-with-administrative-roles/raw.rego @@ -0,0 +1,129 @@ +package armo_builtins + +import future.keywords.in + +deny[msga] { + wl := input[_] + start_of_path := get_start_of_path(wl) + wl_spec := object.get(wl, start_of_path, []) + + # get service account wl is using + sa := input[_] + sa.kind == "ServiceAccount" + is_same_sa(wl_spec, sa.metadata, wl.metadata) + + # check service account token is mounted + is_sa_auto_mounted(wl_spec, sa) + + # check if sa has administrative roles + role := input[_] + role.kind in ["Role", "ClusterRole"] + is_administrative_role(role) + + rolebinding := input[_] + rolebinding.kind in ["RoleBinding", "ClusterRoleBinding"] + rolebinding.roleRef.name == role.metadata.name + rolebinding.subjects[j].kind == "ServiceAccount" + rolebinding.subjects[j].name == sa.metadata.name + rolebinding.subjects[j].namespace == sa.metadata.namespace + + reviewPath := "roleRef" + deletePath := sprintf("subjects[%d]", [j]) + + msga := { + "alertMessage": sprintf("%v: %v in the following namespace: %v has administrative roles", [wl.kind, wl.metadata.name, wl.metadata.namespace]), + "packagename": "armo_builtins", + "alertScore": 9, + "alertObject": { + "k8sApiObjects": [wl] + }, + "relatedObjects": [{ + "object": sa, + }, + { + "object": rolebinding, + "reviewPaths": [reviewPath], + "deletePaths": [deletePath], + }, + { + "object": role, + },] + } +} + + +get_start_of_path(workload) = start_of_path { + spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} + spec_template_spec_patterns[workload.kind] + start_of_path := ["spec", "template", "spec"] +} + +get_start_of_path(workload) = start_of_path { + workload.kind == "Pod" + start_of_path := ["spec"] +} + +get_start_of_path(workload) = start_of_path { + workload.kind == "CronJob" + start_of_path := ["spec", "jobTemplate", "spec", "template", "spec"] +} + + +is_sa_auto_mounted(wl_spec, sa) { + # automountServiceAccountToken not in pod spec + not wl_spec.automountServiceAccountToken == false + not wl_spec.automountServiceAccountToken == true + + not sa.automountServiceAccountToken == false +} + +is_sa_auto_mounted(wl_spec, sa) { + # automountServiceAccountToken set to true in pod spec + wl_spec.automountServiceAccountToken == true +} + + +is_same_sa(wl_spec, sa_metadata, wl_metadata) { + wl_spec.serviceAccountName == sa_metadata.name + is_same_namespace(sa_metadata , wl_metadata) +} + +is_same_sa(wl_spec, sa_metadata, wl_metadata) { + not wl_spec.serviceAccountName + sa_metadata.name == "default" + is_same_namespace(sa_metadata , wl_metadata) +} + +# is_same_namespace supports cases where ns is not configured in the metadata +# for yaml scans +is_same_namespace(metadata1, metadata2) { + metadata1.namespace == metadata2.namespace +} + +is_same_namespace(metadata1, metadata2) { + not metadata1.namespace + not metadata2.namespace +} + +is_same_namespace(metadata1, metadata2) { + not metadata2.namespace + metadata1.namespace == "default" +} + +is_same_namespace(metadata1, metadata2) { + not metadata1.namespace + metadata2.namespace == "default" +} + + +is_administrative_role(role){ + administrative_resources := ["*"] + administrative_verbs := ["*"] + administrative_api_groups := ["", "*"] + + administrative_rule := [rule | rule = role.rules[i] ; + rule.resources[a] in administrative_resources ; + rule.verbs[b] in administrative_verbs ; + rule.apiGroups[c] in administrative_api_groups] + count(administrative_rule) > 0 +} diff --git a/rules/workload-with-administrative-roles/rule.metadata.json b/rules/workload-with-administrative-roles/rule.metadata.json new file mode 100644 index 000000000..60fa9baf0 --- /dev/null +++ b/rules/workload-with-administrative-roles/rule.metadata.json @@ -0,0 +1,63 @@ +{ + "name": "workload-with-administrative-roles", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins" +} \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/expected.json b/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/expected.json new file mode 100644 index 000000000..2145eb79b --- /dev/null +++ b/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/expected.json @@ -0,0 +1,110 @@ +[ + { + "alertMessage": "Pod: test-pd in the following namespace: default has administrative roles", + "failedPaths": null, + "reviewPaths": null, + "deletePaths": null, + "fixPaths": null, + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 9, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "test-pd" + } + } + ] + }, + "relatedObjects": [ + { + "object": { + "apiVersion": "v1", + "automountServiceAccountToken": true, + "kind": "ServiceAccount", + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] + }, + "failedPaths": null, + "reviewPaths": null, + "deletePaths": null, + "fixPaths": null + }, + { + "object": { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRoleBinding", + "metadata": { + "name": "read-secrets-global" + }, + "roleRef": { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "name": "test" + }, + "subjects": [ + { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "Group", + "name": "manager" + }, + { + "kind": "ServiceAccount", + "name": "default", + "namespace": "default" + } + ] + }, + "failedPaths": null, + "reviewPaths": [ + "roleRef" + ], + "deletePaths": [ + "subjects[1]" + ], + "fixPaths": null + }, + { + "object": { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": { + "name": "test" + }, + "rules": [ + { + "apiGroups": [ + "" + ], + "resources": [ + "pods", + "*" + ], + "verbs": [ + "create", + "*" + ] + } + ] + }, + "failedPaths": null, + "reviewPaths": null, + "deletePaths": null, + "fixPaths": null + } + ] + } +] \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/clusterrole.yaml b/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/clusterrole.yaml new file mode 100644 index 000000000..630c8f34f --- /dev/null +++ b/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test +rules: +- apiGroups: [""] + resources: ["pods", "*"] + verbs: ["create", "*"] \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/clusterrolebinding.yaml b/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/clusterrolebinding.yaml new file mode 100644 index 000000000..ba2b69958 --- /dev/null +++ b/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-secrets-global +subjects: +- kind: Group + name: manager + apiGroup: rbac.authorization.k8s.io +- kind: ServiceAccount + name: default + namespace: default +roleRef: + kind: ClusterRole + name: test + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/file.yaml b/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/file.yaml new file mode 100644 index 000000000..495720efa --- /dev/null +++ b/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/file.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pd + namespace: default +spec: + automountServiceAccountToken: true + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + path: /var diff --git a/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/sa.json b/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/sa.json new file mode 100644 index 000000000..ab36c3bb1 --- /dev/null +++ b/rules/workload-with-administrative-roles/test/fail-wl-creates-pod/input/sa.json @@ -0,0 +1,17 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "automountServiceAccountToken": true, + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] +} diff --git a/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/expected.json b/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/clusterrole.yaml b/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/clusterrole.yaml new file mode 100644 index 000000000..54ca1a619 --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/clusterrole.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test +rules: +- apiGroups: ["*"] + resources: ["secrets"] + verbs: ["*"] +- apiGroups: [""] + resources: ["*"] + verbs: ["get"] \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/clusterrolebinding.yaml b/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/clusterrolebinding.yaml new file mode 100644 index 000000000..e61c4d450 --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-secrets-global +subjects: +- kind: ServiceAccount + name: default + namespace: default +- kind: Group + name: dev + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: test + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/file.yaml b/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/file.yaml new file mode 100644 index 000000000..495720efa --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/file.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pd + namespace: default +spec: + automountServiceAccountToken: true + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + path: /var diff --git a/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/sa.json b/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/sa.json new file mode 100644 index 000000000..ab36c3bb1 --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-limited-permissions/input/sa.json @@ -0,0 +1,17 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "automountServiceAccountToken": true, + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] +} diff --git a/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/expected.json b/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/clusterrole.yaml b/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/clusterrole.yaml new file mode 100644 index 000000000..6ede27070 --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test +rules: +- apiGroups: ["*"] + resources: ["*", "secrets", "users"] + verbs: ["get", "*"] \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/clusterrolebinding.yaml b/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/clusterrolebinding.yaml new file mode 100644 index 000000000..e1426bc28 --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-secrets-global +subjects: +- kind: Group + name: manager + apiGroup: rbac.authorization.k8s.io +- kind: Group + name: dev + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: test + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/file.yaml b/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/file.yaml new file mode 100644 index 000000000..495720efa --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/file.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pd + namespace: default +spec: + automountServiceAccountToken: true + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + path: /var diff --git a/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/sa.json b/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/sa.json new file mode 100644 index 000000000..ab36c3bb1 --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-not-mount-sa-token/input/sa.json @@ -0,0 +1,17 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "automountServiceAccountToken": true, + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] +} diff --git a/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/expected.json b/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/cluterrole.yaml b/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/cluterrole.yaml new file mode 100644 index 000000000..fd8e287be --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/cluterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test +rules: +- apiGroups: [""] + resources: ["*"] + verbs: ["*"] \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/file.yaml b/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/file.yaml new file mode 100644 index 000000000..495720efa --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/file.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pd + namespace: default +spec: + automountServiceAccountToken: true + containers: + - image: k8s.gcr.io/test-webserver + name: test-container + volumeMounts: + - mountPath: /test-pd + name: test-volume + volumes: + - name: test-volume + hostPath: + path: /var diff --git a/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/rolebinding.yaml b/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/rolebinding.yaml new file mode 100644 index 000000000..4448be426 --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/rolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod + namespace: kube-system +subjects: +- kind: User + name: jane + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: test + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/sa.json b/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/sa.json new file mode 100644 index 000000000..ab36c3bb1 --- /dev/null +++ b/rules/workload-with-administrative-roles/test/pass-wl-rolebinding/input/sa.json @@ -0,0 +1,17 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "automountServiceAccountToken": true, + "metadata": { + "creationTimestamp": "2022-02-07T11:21:55Z", + "name": "default", + "namespace": "default", + "resourceVersion": "410", + "uid": "5195ed3a-fa3c-46ce-8c66-32d1a83ea41f" + }, + "secrets": [ + { + "name": "default-token-sn9f8" + } + ] +} From 26900de9bdeeb6d29007b54150f3acf3bd295ac0 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 <80635572+YiscahLevySilas1@users.noreply.github.com> Date: Sun, 3 Mar 2024 15:04:37 +0200 Subject: [PATCH 123/195] Revert "Adding "create release without system tests" gh action" (#584) --- .../create-releas-without-tests.yaml | 124 ------------------ 1 file changed, 124 deletions(-) delete mode 100644 .github/workflows/create-releas-without-tests.yaml diff --git a/.github/workflows/create-releas-without-tests.yaml b/.github/workflows/create-releas-without-tests.yaml deleted file mode 100644 index 886487d91..000000000 --- a/.github/workflows/create-releas-without-tests.yaml +++ /dev/null @@ -1,124 +0,0 @@ -name: create release without system tests -on: - workflow_dispatch: - inputs: - TAG: - description: 'Tag name' - required: true - type: string - -env: - REGO_ARTIFACT_KEY_NAME: rego_artifact - REGO_ARTIFACT_PATH: release - -jobs: - # build regolibrary artifacts / test rego dependencies / test rego unit-tests - build-and-rego-test: - name: Build and test rego artifacts - runs-on: ubuntu-latest - outputs: - NEW_TAG: ${{ steps.tag-calculator.outputs.NEW_TAG }} - REGO_ARTIFACT_KEY_NAME: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_KEY_NAME }} - REGO_ARTIFACT_PATH: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_PATH }} - steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f - name: checkout repo content - with: - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - - - id: tag-calculator - uses: kubescape/workflows/.github/actions/tag-action@main - with: - ORIGINAL_TAG: ${{ inputs.TAG }} - SUB_STRING: "-rc" - - # Test using Golang OPA hot rule compilation - - name: Set up Go - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 - with: - go-version: '1.20' - - - name: setup python - uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa - with: - python-version: 3.10.6 - - # generating subsections ids - - name: Update frameworks subsections - run: python ./scripts/generate_subsections_ids.py - - # validate control-ID duplications - - run: python ./scripts/validations.py - - # run export script to generate regolibrary artifacts - - run: python ./scripts/export.py - - # removing release artifacts file extensions - - name: Strip Metadata Files Extensions - run: | - cd release - find -type f -name '*.json' | while read f; do mv "$f" "${f%.json}"; done - find -type f -name '*.csv' | while read f; do mv "$f" "${f%.csv}"; done - - - run: ls -laR - - - name: Set outputs - id: set_outputs - run: | - echo "REGO_ARTIFACT_KEY_NAME=${{ env.REGO_ARTIFACT_KEY_NAME }}" >> $GITHUB_OUTPUT - echo "REGO_ARTIFACT_PATH=${{ env.REGO_ARTIFACT_PATH }}" >> $GITHUB_OUTPUT - - - uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # ratchet:actions/upload-artifact@v3.1.1 - name: Upload artifact - with: - name: ${{ env.REGO_ARTIFACT_KEY_NAME }} - path: ${{ env.REGO_ARTIFACT_PATH }}/ - if-no-files-found: error - - # start release process - release: - if: ${{ (always() && (contains(needs.*.result, 'success')) && !(contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }} - name: create release and upload assets - needs: [build-and-rego-test] - runs-on: ubuntu-latest - steps: - - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # ratchet:actions/download-artifact@v3.0.2 - id: download-artifact - with: - name: ${{ env.REGO_ARTIFACT_KEY_NAME }} - path: ${{ env.REGO_ARTIFACT_PATH }} - - - name: Create Release and upload assets - id: create_release_upload_assets - uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 - with: - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - name: Release ${{ needs.build-and-rego-test.outputs.NEW_TAG }} - tag_name: ${{ needs.build-and-rego-test.outputs.NEW_TAG }} - draft: false - fail_on_unmatched_files: true - prerelease: false - files: '${{ env.REGO_ARTIFACT_PATH }}/*' - - # Update regolibrary documentation with latest controls and rules. - update-documentation: - needs: [release] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # ratchet:actions/checkout@v3.5.2 - name: checkout repo content - - name: setup python - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # ratchet:actions/setup-python@v4.6.0 - with: - python-version: 3.8 - - name: install dependencies - run: | - python -m pip install --upgrade pip - pip install requests - - name: execute upload script - env: - README_API_KEY: ${{ secrets.README_API_KEY }} - run: |- - python ./scripts/upload-readme.py - - name: execute docs generator script - run: python ./scripts/mk-generator.py # Script to generate controls library documentation From a9bf393cd5cfae684a4ea88c765ec7cd85ceb49c Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 <80635572+YiscahLevySilas1@users.noreply.github.com> Date: Mon, 4 Mar 2024 14:34:52 +0200 Subject: [PATCH 124/195] add control C-0273 - outdated k8s version (#596) * add control C-0272 Signed-off-by: YiscahLevySilas1 * add control C-0273 Signed-off-by: YiscahLevySilas1 * add dependencies Signed-off-by: YiscahLevySilas1 --------- Signed-off-by: YiscahLevySilas1 --- .github/workflows/create-release.yaml | 4 + .github/workflows/pr-tests.yaml | 5 + controls/C-0273-outdatedk8sversion.json | 22 ++ rules/outdated-k8s-version/raw.rego | 25 ++ rules/outdated-k8s-version/rule.metadata.json | 22 ++ .../test/fail/expected.json | 38 +++ .../test/fail/input/node.json | 211 +++++++++++++ .../test/fail2/expected.json | 35 +++ .../test/fail2/input/node.json | 296 ++++++++++++++++++ .../test/pass/expected.json | 1 + .../test/pass/input/node.json | 211 +++++++++++++ scripts/validations.py | 54 ++++ 12 files changed, 924 insertions(+) create mode 100644 controls/C-0273-outdatedk8sversion.json create mode 100644 rules/outdated-k8s-version/raw.rego create mode 100644 rules/outdated-k8s-version/rule.metadata.json create mode 100644 rules/outdated-k8s-version/test/fail/expected.json create mode 100644 rules/outdated-k8s-version/test/fail/input/node.json create mode 100644 rules/outdated-k8s-version/test/fail2/expected.json create mode 100644 rules/outdated-k8s-version/test/fail2/input/node.json create mode 100644 rules/outdated-k8s-version/test/pass/expected.json create mode 100644 rules/outdated-k8s-version/test/pass/input/node.json diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml index 9de029f70..413209019 100644 --- a/.github/workflows/create-release.yaml +++ b/.github/workflows/create-release.yaml @@ -63,6 +63,10 @@ jobs: uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa with: python-version: 3.10.6 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install requests # generating subsections ids - name: Update frameworks subsections diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 57a7397bb..b1b79309b 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -74,6 +74,11 @@ jobs: uses: actions/setup-python@v4 with: python-version: 3.10.6 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install requests + # validate control-ID duplications - run: python ./scripts/validations.py diff --git a/controls/C-0273-outdatedk8sversion.json b/controls/C-0273-outdatedk8sversion.json new file mode 100644 index 000000000..2e933cfab --- /dev/null +++ b/controls/C-0273-outdatedk8sversion.json @@ -0,0 +1,22 @@ +{ + "name": "Outdated Kubernetes version", + "attributes": {}, + "description": "Identifies Kubernetes clusters running on outdated versions. Using old versions can expose clusters to known vulnerabilities, compatibility issues, and miss out on improved features and security patches. Keeping Kubernetes up-to-date is crucial for maintaining security and operational efficiency.", + "remediation": "Regularly update Kubernetes clusters to the latest stable version to mitigate known vulnerabilities and enhance functionality. Plan and execute upgrades considering workload compatibility, testing in a staging environment before applying changes to production. Follow Kubernetes' best practices for version management and upgrades to ensure a smooth transition and minimal downtime.", + "rulesNames": [ + "outdated-k8s-version" + ], + "long_description": "Running an outdated version of Kubernetes poses significant security risks and operational challenges. Older versions may contain unpatched vulnerabilities, leading to potential security breaches and unauthorized access. Additionally, outdated clusters might not support newer, more secure, and efficient features, impacting both performance and security. Regularly updating Kubernetes ensures compliance with the latest security standards and access to enhanced functionalities.", + "test": "Verifies the current Kubernetes version against the latest stable releases.", + "controlID": "C-0273", + "baseScore": 2.0, + "category": { + "name": "Control plane" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } +} \ No newline at end of file diff --git a/rules/outdated-k8s-version/raw.rego b/rules/outdated-k8s-version/raw.rego new file mode 100644 index 000000000..f352d0fd3 --- /dev/null +++ b/rules/outdated-k8s-version/raw.rego @@ -0,0 +1,25 @@ +package armo_builtins + +import future.keywords.every + +deny[msga] { + node := input[_] + node.kind == "Node" + current_version := node.status.nodeInfo.kubeletVersion + has_outdated_version(current_version) + path := "status.nodeInfo.kubeletVersion" + msga := { + "alertMessage": sprintf("Your kubelet version: %s, in node: %s is outdated", [current_version, node.metadata.name]), + "reviewPaths": [path], + "alertObject": {"k8SApiObjects": [node]}, + } +} + + +has_outdated_version(version) { + # the `supported_k8s_versions` is validated in the validations script against "https://api.github.com/repos/kubernetes/kubernetes/releases" + supported_k8s_versions := ["v1.29", "v1.28", "v1.27"] + every v in supported_k8s_versions{ + not startswith(version, v) + } +} diff --git a/rules/outdated-k8s-version/rule.metadata.json b/rules/outdated-k8s-version/rule.metadata.json new file mode 100644 index 000000000..4efa6cf6a --- /dev/null +++ b/rules/outdated-k8s-version/rule.metadata.json @@ -0,0 +1,22 @@ +{ + "name": "outdated-k8s-version", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins" +} \ No newline at end of file diff --git a/rules/outdated-k8s-version/test/fail/expected.json b/rules/outdated-k8s-version/test/fail/expected.json new file mode 100644 index 000000000..4191e8669 --- /dev/null +++ b/rules/outdated-k8s-version/test/fail/expected.json @@ -0,0 +1,38 @@ +[ + { + "alertMessage": "Your kubelet version: v1.20.7, in node: minikube is outdated", + "failedPaths": null, + "reviewPaths": [ + "status.nodeInfo.kubeletVersion" + ], + "deletePaths": null, + "fixPaths": null, + "ruleStatus": "", + "packagename": "", + "alertScore": 0, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube", + "kubernetes.io/os": "linux", + "minikube.k8s.io/commit": "76d74191d82c47883dc7e1319ef7cebd3e00ee11", + "minikube.k8s.io/name": "minikube", + "minikube.k8s.io/updated_at": "2022_01_03T11_57_45_0700", + "minikube.k8s.io/version": "v1.21.0", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "" + }, + "name": "minikube" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/outdated-k8s-version/test/fail/input/node.json b/rules/outdated-k8s-version/test/fail/input/node.json new file mode 100644 index 000000000..15ef17826 --- /dev/null +++ b/rules/outdated-k8s-version/test/fail/input/node.json @@ -0,0 +1,211 @@ +{ + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-01-03T09:57:41Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube", + "kubernetes.io/os": "linux", + "minikube.k8s.io/commit": "76d74191d82c47883dc7e1319ef7cebd3e00ee11", + "minikube.k8s.io/name": "minikube", + "minikube.k8s.io/updated_at": "2022_01_03T11_57_45_0700", + "minikube.k8s.io/version": "v1.21.0", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "" + }, + "name": "minikube", + "resourceVersion": "33341", + "uid": "6b3a6670-92aa-41b5-a8a7-a96372b4986b" + }, + "spec": { + "podCIDR": "10.244.0.0/24", + "podCIDRs": [ + "10.244.0.0/24" + ] + }, + "status": { + "addresses": [ + { + "address": "192.168.49.2", + "type": "InternalIP" + }, + { + "address": "minikube", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "4", + "ephemeral-storage": "92563096Ki", + "hugepages-2Mi": "0", + "memory": "10486240Ki", + "pods": "110" + }, + "capacity": { + "cpu": "4", + "ephemeral-storage": "92563096Ki", + "hugepages-2Mi": "0", + "memory": "10486240Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-01-05T07:47:39Z", + "lastTransitionTime": "2022-01-03T09:57:39Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-01-05T07:47:39Z", + "lastTransitionTime": "2022-01-03T09:57:39Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-01-05T07:47:39Z", + "lastTransitionTime": "2022-01-03T09:57:39Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-01-05T07:47:39Z", + "lastTransitionTime": "2022-01-03T09:58:00Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [ + { + "names": [ + "influxdb@sha256:1a48c5c4b957b795cdf381bcf91e0d7de9edea2d9be984afbd6e4922e2e24484", + "influxdb:latest" + ], + "sizeBytes": 345902306 + }, + { + "names": [ + "k8s.gcr.io/etcd@sha256:4ad90a11b55313b182afc186b9876c8e891531b8db4c9bf1541953021618d0e2", + "k8s.gcr.io/etcd:3.4.13-0" + ], + "sizeBytes": 253392289 + }, + { + "names": [ + "kubernetesui/dashboard@sha256:7f80b5ba141bead69c4fee8661464857af300d7d7ed0274cf7beecedc00322e6", + "kubernetesui/dashboard:v2.1.0" + ], + "sizeBytes": 225733746 + }, + { + "names": [ + "k8s.gcr.io/kube-apiserver@sha256:5ab3d676c426bfb272fb7605e6978b90d5676913636a6105688862849961386f", + "k8s.gcr.io/kube-apiserver:v1.20.7" + ], + "sizeBytes": 121762183 + }, + { + "names": [ + "k8s.gcr.io/kube-proxy@sha256:5d2be61150535ed37b7a5fa5a8239f89afee505ab2fae05247447851eed710a8", + "k8s.gcr.io/kube-proxy:v1.20.7" + ], + "sizeBytes": 118396107 + }, + { + "names": [ + "k8s.gcr.io/kube-controller-manager@sha256:eb9b121cbe40cf9016b95cefd34fb9e62c4caf1516188a98b64f091d871a2d46", + "k8s.gcr.io/kube-controller-manager:v1.20.7" + ], + "sizeBytes": 116298119 + }, + { + "names": [ + "nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d", + "nginx:1.14.2" + ], + "sizeBytes": 109129446 + }, + { + "names": [ + "k8s.gcr.io/kube-scheduler@sha256:6fdb12580353b6cd59de486ca650e3ba9270bc8d52f1d3052cd9bb1d4f28e189", + "k8s.gcr.io/kube-scheduler:v1.20.7" + ], + "sizeBytes": 47268231 + }, + { + "names": [ + "k8s.gcr.io/coredns@sha256:73ca82b4ce829766d4f1f10947c3a338888f876fbed0540dc849c89ff256e90c", + "k8s.gcr.io/coredns:1.7.0" + ], + "sizeBytes": 45227747 + }, + { + "names": [ + "kubernetesui/metrics-scraper@sha256:555981a24f184420f3be0c79d4efb6c948a85cfce84034f85a563f4151a81cbf", + "kubernetesui/metrics-scraper:v1.0.4" + ], + "sizeBytes": 36937728 + }, + { + "names": [ + "gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944", + "gcr.io/k8s-minikube/storage-provisioner:v5" + ], + "sizeBytes": 31465472 + }, + { + "names": [ + "k8s.gcr.io/goproxy@sha256:5334c7ad43048e3538775cb09aaf184f5e8acf4b0ea60e3bc8f1d93c209865a5", + "k8s.gcr.io/goproxy:0.1" + ], + "sizeBytes": 5489816 + }, + { + "names": [ + "k8s.gcr.io/test-webserver@sha256:f63e365c13646f231ec4a16791c6133ddd7b80fcd1947f41ab193968e02b0745", + "k8s.gcr.io/test-webserver:latest" + ], + "sizeBytes": 4534272 + }, + { + "names": [ + "k8s.gcr.io/pause@sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f", + "k8s.gcr.io/pause:3.2" + ], + "sizeBytes": 682696 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "85cb3c8a-7d8e-4885-9a9c-e8a340332f21", + "containerRuntimeVersion": "docker://20.10.7", + "kernelVersion": "5.11.0-43-generic", + "kubeProxyVersion": "v1.20.7", + "kubeletVersion": "v1.20.7", + "machineID": "b77ec962e3734760b1e756ffc5e83152", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.2 LTS", + "systemUUID": "5bc452e6-12eb-404d-a17e-7df23ff82f57" + } + } +} diff --git a/rules/outdated-k8s-version/test/fail2/expected.json b/rules/outdated-k8s-version/test/fail2/expected.json new file mode 100644 index 000000000..ba3ba0217 --- /dev/null +++ b/rules/outdated-k8s-version/test/fail2/expected.json @@ -0,0 +1,35 @@ +[ + { + "alertMessage": "Your kubelet version: v1.25.3, in node: attack-chain-6-control-plane is outdated", + "failedPaths": null, + "reviewPaths": [ + "status.nodeInfo.kubeletVersion" + ], + "deletePaths": null, + "fixPaths": null, + "ruleStatus": "", + "packagename": "", + "alertScore": 0, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "ingress-ready": "true", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "attack-chain-6-control-plane", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/control-plane": "", + "node.kubernetes.io/exclude-from-external-load-balancers": "" + }, + "name": "attack-chain-6-control-plane" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/outdated-k8s-version/test/fail2/input/node.json b/rules/outdated-k8s-version/test/fail2/input/node.json new file mode 100644 index 000000000..f4404ec98 --- /dev/null +++ b/rules/outdated-k8s-version/test/fail2/input/node.json @@ -0,0 +1,296 @@ +{ + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "unix:///run/containerd/containerd.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2024-02-20T11:17:49Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "ingress-ready": "true", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "attack-chain-6-control-plane", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/control-plane": "", + "node.kubernetes.io/exclude-from-external-load-balancers": "" + }, + "name": "attack-chain-6-control-plane", + "resourceVersion": "291629", + "uid": "7102f06c-ec50-4150-a962-83f5e35b7d9d" + }, + "spec": { + "podCIDR": "10.244.0.0/24", + "podCIDRs": [ + "10.244.0.0/24" + ], + "providerID": "kind://docker/attack-chain-6/attack-chain-6-control-plane" + }, + "status": { + "addresses": [ + { + "address": "172.18.0.2", + "type": "InternalIP" + }, + { + "address": "attack-chain-6-control-plane", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "8", + "ephemeral-storage": "486903968Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16081500Ki", + "pods": "110" + }, + "capacity": { + "cpu": "8", + "ephemeral-storage": "486903968Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16081500Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2024-03-04T09:45:05Z", + "lastTransitionTime": "2024-02-20T11:17:46Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2024-03-04T09:45:05Z", + "lastTransitionTime": "2024-02-20T11:17:46Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2024-03-04T09:45:05Z", + "lastTransitionTime": "2024-02-20T11:17:46Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2024-03-04T09:45:05Z", + "lastTransitionTime": "2024-02-20T11:18:12Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [ + { + "names": [ + "docker.io/library/wordpress@sha256:5f1873a461105cb1dc1a75731671125f1fb406b18e3fcf63210e8f7f84ce560b", + "docker.io/library/wordpress:6.0.1-php7.4" + ], + "sizeBytes": 214624632 + }, + { + "names": [ + "docker.io/library/mysql@sha256:ff5ab9cdce0b4c59704b4e2a09deed5ab8467be795e0ea20228b8528f53fcf82", + "docker.io/library/mysql:oracle" + ], + "sizeBytes": 183413119 + }, + { + "names": [ + "registry.k8s.io/etcd:3.5.4-0" + ], + "sizeBytes": 102157811 + }, + { + "names": [ + "docker.io/bitnami/kubectl@sha256:15f8664618ec2efea467067e86591b876eef0ef84f1ad09e15aab5ca5bc441fb", + "docker.io/bitnami/kubectl:1.27.6" + ], + "sizeBytes": 80620317 + }, + { + "names": [ + "docker.io/library/import-2022-11-02@sha256:975ab6838f71ba7f8ed71ea319124bc7adbdc41a639e329e76d0beda84d5c193", + "registry.k8s.io/kube-apiserver:v1.25.3" + ], + "sizeBytes": 76530158 + }, + { + "names": [ + "quay.io/kubescape/kubescape@sha256:9ccc948e83b22cd3fc6919b4e3e44536530cc9426a13b8d5e07bf3b2bd1b0f22", + "quay.io/kubescape/kubescape:v3.0.3" + ], + "sizeBytes": 71122909 + }, + { + "names": [ + "docker.io/library/import-2022-11-02@sha256:ea11577bfe5c64a2c95b291596042b878dcc627903e3d9e3734c51b0fc019af1", + "registry.k8s.io/kube-controller-manager:v1.25.3" + ], + "sizeBytes": 64499324 + }, + { + "names": [ + "docker.io/library/import-2022-11-02@sha256:a8333982f3c16667801faa12373e05cc7024e0182e4d034679b83e4911d1fdd1", + "registry.k8s.io/kube-proxy:v1.25.3" + ], + "sizeBytes": 63273981 + }, + { + "names": [ + "docker.io/library/import-2022-11-02@sha256:2a436be2b9f9f6973775123308760aeb44edaeda099374efd3ddab5a91812121", + "registry.k8s.io/kube-scheduler:v1.25.3" + ], + "sizeBytes": 51920508 + }, + { + "names": [ + "quay.io/kubescape/kubevuln@sha256:94cbbb94f8d6bdf2529d5f9c5279ac4c7411182f4e8e5a3d0b5e8f10a465f73a", + "quay.io/kubescape/kubevuln:v0.3.2" + ], + "sizeBytes": 51702904 + }, + { + "names": [ + "quay.io/kubescape/storage@sha256:9b712b34dbc38cc40a212e7857cac5ce3880624bd7afdd34fb7ff89867cbfaaa", + "quay.io/kubescape/storage:v0.0.69" + ], + "sizeBytes": 43160553 + }, + { + "names": [ + "quay.io/kubescape/storage@sha256:b6ecc63dc4e16e1ae395c9bde571e39665166c5cc30d57f4f2dcb20cffac6fa7", + "quay.io/kubescape/storage:v0.0.67" + ], + "sizeBytes": 43122123 + }, + { + "names": [ + "quay.io/kubescape/synchronizer@sha256:205b3a3ea5f68ea537c820b353baa542ca126aa8223b76cbc8396e581698eaa4", + "quay.io/kubescape/synchronizer:v0.0.59" + ], + "sizeBytes": 39717995 + }, + { + "names": [ + "quay.io/kubescape/node-agent@sha256:c15f198440e20d404dcb1eed4efed1393b8871c09b31fb49c9a0eb335ad7097c", + "quay.io/kubescape/node-agent:v0.2.12" + ], + "sizeBytes": 38501567 + }, + { + "names": [ + "quay.io/kubescape/node-agent@sha256:77a965ea2abffdd6b4a6988db7c7e009912e8bea1cb2c05f31c0bd74daf50c10", + "quay.io/kubescape/node-agent:v0.2.10" + ], + "sizeBytes": 38455760 + }, + { + "names": [ + "quay.io/kubescape/operator@sha256:dd2adac214bae06915d9b2b5b383212f8dae463d7fe4e56cb26b20f88cb623ee", + "quay.io/kubescape/operator:v0.2.4" + ], + "sizeBytes": 37683286 + }, + { + "names": [ + "quay.io/kubescape/gateway@sha256:f3852c3deb8838d4891cfa63f6d266fbe0daed34152219f1f5e970bbb4e35b1e", + "quay.io/kubescape/gateway:v0.1.20" + ], + "sizeBytes": 29748754 + }, + { + "names": [ + "docker.io/otel/opentelemetry-collector@sha256:92f6e2efd014152bee26f8324e3a511980b512a36d8793d3fee708715caaa6c0", + "docker.io/otel/opentelemetry-collector:0.92.0" + ], + "sizeBytes": 28355004 + }, + { + "names": [ + "docker.io/kindest/kindnetd:v20221004-44d545d1" + ], + "sizeBytes": 25830582 + }, + { + "names": [ + "docker.io/kindest/local-path-provisioner:v0.0.22-kind.0" + ], + "sizeBytes": 17375346 + }, + { + "names": [ + "quay.io/kubescape/kollector@sha256:da216606a706e97a3456a3c2f3eee380db9579de3140a5f26623febe4ca4e6c4", + "quay.io/kubescape/kollector:v0.1.33" + ], + "sizeBytes": 16993483 + }, + { + "names": [ + "registry.k8s.io/coredns/coredns:v1.9.3" + ], + "sizeBytes": 14837849 + }, + { + "names": [ + "quay.io/kubescape/http-request@sha256:42e1d32255ad77cf980e5edfafaa1ee2688c217b67ac50b218e909bc5bb39276", + "quay.io/kubescape/http-request:v0.2.2" + ], + "sizeBytes": 7339246 + }, + { + "names": [ + "quay.io/kubescape/http-request@sha256:4b5f47715f2daefd4eb6265d410588bcda90e97a0588383f7b0904cac9baea26", + "quay.io/kubescape/http-request:v0.0.14" + ], + "sizeBytes": 6489710 + }, + { + "names": [ + "quay.io/kubescape/host-scanner@sha256:89fe7df48898769110dc6fb96050c3a8f58dd8d8dbc795b21471bb68148516f2", + "quay.io/kubescape/host-scanner:v1.0.66" + ], + "sizeBytes": 6472151 + }, + { + "names": [ + "docker.io/kindest/local-path-helper:v20220607-9a4d8d2a" + ], + "sizeBytes": 2859509 + }, + { + "names": [ + "registry.k8s.io/pause:3.7" + ], + "sizeBytes": 311278 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "a0a0427b-40ea-4cf5-9f24-53d6e200ef2d", + "containerRuntimeVersion": "containerd://1.6.9", + "kernelVersion": "6.5.0-21-generic", + "kubeProxyVersion": "v1.25.3", + "kubeletVersion": "v1.25.3", + "machineID": "9cbf8e2fbf5540cd8ff218ef016ba690", + "operatingSystem": "linux", + "osImage": "Ubuntu 22.04.1 LTS", + "systemUUID": "3f39a350-3cd4-400f-875d-c270379817b0" + } + } +} diff --git a/rules/outdated-k8s-version/test/pass/expected.json b/rules/outdated-k8s-version/test/pass/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/outdated-k8s-version/test/pass/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/outdated-k8s-version/test/pass/input/node.json b/rules/outdated-k8s-version/test/pass/input/node.json new file mode 100644 index 000000000..ee12e6144 --- /dev/null +++ b/rules/outdated-k8s-version/test/pass/input/node.json @@ -0,0 +1,211 @@ +{ + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-01-03T09:57:41Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube", + "kubernetes.io/os": "linux", + "minikube.k8s.io/commit": "76d74191d82c47883dc7e1319ef7cebd3e00ee11", + "minikube.k8s.io/name": "minikube", + "minikube.k8s.io/updated_at": "2022_01_03T11_57_45_0700", + "minikube.k8s.io/version": "v1.21.0", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "" + }, + "name": "minikube", + "resourceVersion": "33341", + "uid": "6b3a6670-92aa-41b5-a8a7-a96372b4986b" + }, + "spec": { + "podCIDR": "10.244.0.0/24", + "podCIDRs": [ + "10.244.0.0/24" + ] + }, + "status": { + "addresses": [ + { + "address": "192.168.49.2", + "type": "InternalIP" + }, + { + "address": "minikube", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "4", + "ephemeral-storage": "92563096Ki", + "hugepages-2Mi": "0", + "memory": "10486240Ki", + "pods": "110" + }, + "capacity": { + "cpu": "4", + "ephemeral-storage": "92563096Ki", + "hugepages-2Mi": "0", + "memory": "10486240Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-01-05T07:47:39Z", + "lastTransitionTime": "2022-01-03T09:57:39Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-01-05T07:47:39Z", + "lastTransitionTime": "2022-01-03T09:57:39Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-01-05T07:47:39Z", + "lastTransitionTime": "2022-01-03T09:57:39Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-01-05T07:47:39Z", + "lastTransitionTime": "2022-01-03T09:58:00Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [ + { + "names": [ + "influxdb@sha256:1a48c5c4b957b795cdf381bcf91e0d7de9edea2d9be984afbd6e4922e2e24484", + "influxdb:latest" + ], + "sizeBytes": 345902306 + }, + { + "names": [ + "k8s.gcr.io/etcd@sha256:4ad90a11b55313b182afc186b9876c8e891531b8db4c9bf1541953021618d0e2", + "k8s.gcr.io/etcd:3.4.13-0" + ], + "sizeBytes": 253392289 + }, + { + "names": [ + "kubernetesui/dashboard@sha256:7f80b5ba141bead69c4fee8661464857af300d7d7ed0274cf7beecedc00322e6", + "kubernetesui/dashboard:v2.1.0" + ], + "sizeBytes": 225733746 + }, + { + "names": [ + "k8s.gcr.io/kube-apiserver@sha256:5ab3d676c426bfb272fb7605e6978b90d5676913636a6105688862849961386f", + "k8s.gcr.io/kube-apiserver:v1.28.6" + ], + "sizeBytes": 121762183 + }, + { + "names": [ + "k8s.gcr.io/kube-proxy@sha256:5d2be61150535ed37b7a5fa5a8239f89afee505ab2fae05247447851eed710a8", + "k8s.gcr.io/kube-proxy:v1.28.6" + ], + "sizeBytes": 118396107 + }, + { + "names": [ + "k8s.gcr.io/kube-controller-manager@sha256:eb9b121cbe40cf9016b95cefd34fb9e62c4caf1516188a98b64f091d871a2d46", + "k8s.gcr.io/kube-controller-manager:v1.28.6" + ], + "sizeBytes": 116298119 + }, + { + "names": [ + "nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d", + "nginx:1.14.2" + ], + "sizeBytes": 109129446 + }, + { + "names": [ + "k8s.gcr.io/kube-scheduler@sha256:6fdb12580353b6cd59de486ca650e3ba9270bc8d52f1d3052cd9bb1d4f28e189", + "k8s.gcr.io/kube-scheduler:v1.28.6" + ], + "sizeBytes": 47268231 + }, + { + "names": [ + "k8s.gcr.io/coredns@sha256:73ca82b4ce829766d4f1f10947c3a338888f876fbed0540dc849c89ff256e90c", + "k8s.gcr.io/coredns:1.7.0" + ], + "sizeBytes": 45227747 + }, + { + "names": [ + "kubernetesui/metrics-scraper@sha256:555981a24f184420f3be0c79d4efb6c948a85cfce84034f85a563f4151a81cbf", + "kubernetesui/metrics-scraper:v1.0.4" + ], + "sizeBytes": 36937728 + }, + { + "names": [ + "gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944", + "gcr.io/k8s-minikube/storage-provisioner:v5" + ], + "sizeBytes": 31465472 + }, + { + "names": [ + "k8s.gcr.io/goproxy@sha256:5334c7ad43048e3538775cb09aaf184f5e8acf4b0ea60e3bc8f1d93c209865a5", + "k8s.gcr.io/goproxy:0.1" + ], + "sizeBytes": 5489816 + }, + { + "names": [ + "k8s.gcr.io/test-webserver@sha256:f63e365c13646f231ec4a16791c6133ddd7b80fcd1947f41ab193968e02b0745", + "k8s.gcr.io/test-webserver:latest" + ], + "sizeBytes": 4534272 + }, + { + "names": [ + "k8s.gcr.io/pause@sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f", + "k8s.gcr.io/pause:3.2" + ], + "sizeBytes": 682696 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "85cb3c8a-7d8e-4885-9a9c-e8a340332f21", + "containerRuntimeVersion": "docker://20.10.7", + "kernelVersion": "5.11.0-43-generic", + "kubeProxyVersion": "v1.28.6", + "kubeletVersion": "v1.28.6", + "machineID": "b77ec962e3734760b1e756ffc5e83152", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.2 LTS", + "systemUUID": "5bc452e6-12eb-404d-a17e-7df23ff82f57" + } + } +} diff --git a/scripts/validations.py b/scripts/validations.py index ede55984d..abfe74564 100644 --- a/scripts/validations.py +++ b/scripts/validations.py @@ -1,6 +1,7 @@ import json import os import re +import requests FRAMEWORK_DIR = "frameworks" CONTROLS_DIR = "controls" @@ -10,6 +11,7 @@ CONTROLID_TO_FILENAME = {} RULENAME_TO_RULE_DIR = {} ATTACK_TRACKS_DICT = {} +k8s_RELEASE_URL = "https://api.github.com/repos/kubernetes/kubernetes/releases" def ignore_file(file_name: str): return file_name.startswith('__') @@ -152,6 +154,57 @@ def validate_rules(): data = json.load(rule_file) assert data["name"] in RULES_CHECKED, f"rule {data['name']} is not used by any control" +def get_kubernetes_supported_versions(): + try: + response = requests.get(k8s_RELEASE_URL) + response.raise_for_status() + except requests.RequestException as e: + raise Exception("Failed to fetch Kubernetes releases") from e + + releases = response.json() + supported_versions = [] + for release in releases: + if not release['draft'] and not release['prerelease']: + tag_name = release['tag_name'] + if all(x not in tag_name for x in ['alpha', 'beta', 'rc']): + major_minor_version = '.'.join(tag_name.lstrip('v').split('.')[:2]) + if major_minor_version not in supported_versions: + supported_versions.append(major_minor_version) + if len(supported_versions) == 3: + break + + if not supported_versions: + raise Exception("No supported Kubernetes versions found.") + return supported_versions + +def validate_k8s_supported_versions_in_rego(): + # Step 1: Get the latest supported Kubernetes versions + api_versions = get_kubernetes_supported_versions() + + # Step 2 & 3: Check the Rego file and compare + # Read the rego file + file_path = os.path.join("rules/outdated-k8s-version/raw.rego") + try: + with open(file_path, 'r') as file: + rego_content = file.read() + except FileNotFoundError: + raise Exception(f"File {file_path} not found.") + + # Extract the currently supported versions from the file + versions_pattern = re.compile(r'supported_k8s_versions := \["(v[0-9]+\.[0-9]+)", "(v[0-9]+\.[0-9]+)", "(v[0-9]+\.[0-9]+)"\]') + match = versions_pattern.search(rego_content) + if not match: + raise Exception("Could not find the supported Kubernetes versions in the Rego file.") + + file_versions = list(match.groups()) + # Format the API versions to match the Rego file format + formatted_api_versions = ['v' + version for version in api_versions] + + # Compare the versions from the API with those in the file + if set(formatted_api_versions) != set(file_versions): + raise Exception(f"The Rego file's (outdated-k8s-version/raw.rego) supported Kubernetes versions: {file_versions} do not match the latest Kubernetes supported versions: {formatted_api_versions} from {k8s_RELEASE_URL}. Please update the Rego file: rules/outdated-k8s-version/raw.rego") + else: + print("The rule: outdated-k8s-version/raw.rego contains the correct latest supported Kubernetes versions.") if __name__ == "__main__": fill_rulename_to_rule_dir() @@ -160,3 +213,4 @@ def validate_rules(): validate_controls_in_framework() validate_controls() validate_rules() + validate_k8s_supported_versions_in_rego() From a8885651165fb8dea2b21164c5defd2e05c49388 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 4 Mar 2024 15:57:02 +0200 Subject: [PATCH 125/195] add workflow to create release without system tests Signed-off-by: YiscahLevySilas1 --- .../workflows/create-release-without-st.yaml | 124 ++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 .github/workflows/create-release-without-st.yaml diff --git a/.github/workflows/create-release-without-st.yaml b/.github/workflows/create-release-without-st.yaml new file mode 100644 index 000000000..34f9f1597 --- /dev/null +++ b/.github/workflows/create-release-without-st.yaml @@ -0,0 +1,124 @@ +name: create release without system tests +on: + workflow_dispatch: + inputs: + TAG: + description: 'Tag name' + required: true + type: string + +env: + REGO_ARTIFACT_KEY_NAME: rego_artifact + REGO_ARTIFACT_PATH: release + +jobs: + # build regolibrary artifacts / test rego dependencies / test rego unit-tests + build-and-rego-test: + name: Build and test rego artifacts + runs-on: ubuntu-latest + outputs: + NEW_TAG: ${{ steps.tag-calculator.outputs.NEW_TAG }} + REGO_ARTIFACT_KEY_NAME: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_KEY_NAME }} + REGO_ARTIFACT_PATH: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_PATH }} + steps: + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f + name: checkout repo content + with: + token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + + - id: tag-calculator + uses: kubescape/workflows/.github/actions/tag-action@main + with: + ORIGINAL_TAG: ${{ inputs.TAG }} + SUB_STRING: "-rc" + + # Test using Golang OPA hot rule compilation + - name: Set up Go + uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 + with: + go-version: '1.20' + + - name: setup python + uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa + with: + python-version: 3.10.6 + + # generating subsections ids + - name: Update frameworks subsections + run: python ./scripts/generate_subsections_ids.py + + # validate control-ID duplications + - run: python ./scripts/validations.py + + # run export script to generate regolibrary artifacts + - run: python ./scripts/export.py + + # removing release artifacts file extensions + - name: Strip Metadata Files Extensions + run: | + cd release + find -type f -name '*.json' | while read f; do mv "$f" "${f%.json}"; done + find -type f -name '*.csv' | while read f; do mv "$f" "${f%.csv}"; done + + - run: ls -laR + + - name: Set outputs + id: set_outputs + run: | + echo "REGO_ARTIFACT_KEY_NAME=${{ env.REGO_ARTIFACT_KEY_NAME }}" >> $GITHUB_OUTPUT + echo "REGO_ARTIFACT_PATH=${{ env.REGO_ARTIFACT_PATH }}" >> $GITHUB_OUTPUT + + - uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # ratchet:actions/upload-artifact@v3.1.1 + name: Upload artifact + with: + name: ${{ env.REGO_ARTIFACT_KEY_NAME }} + path: ${{ env.REGO_ARTIFACT_PATH }}/ + if-no-files-found: error + + # start release process + release: + if: ${{ (always() && (contains(needs.*.result, 'success')) && !(contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }} + name: create release and upload assets + needs: [build-and-rego-test] + runs-on: ubuntu-latest + steps: + - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # ratchet:actions/download-artifact@v3.0.2 + id: download-artifact + with: + name: ${{ env.REGO_ARTIFACT_KEY_NAME }} + path: ${{ env.REGO_ARTIFACT_PATH }} + + - name: Create Release and upload assets + id: create_release_upload_assets + uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 + with: + token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + name: Release ${{ needs.build-and-rego-test.outputs.NEW_TAG }} + tag_name: ${{ needs.build-and-rego-test.outputs.NEW_TAG }} + draft: false + fail_on_unmatched_files: true + prerelease: false + files: '${{ env.REGO_ARTIFACT_PATH }}/*' + + # Update regolibrary documentation with latest controls and rules. + update-documentation: + needs: [release] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # ratchet:actions/checkout@v3.5.2 + name: checkout repo content + - name: setup python + uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # ratchet:actions/setup-python@v4.6.0 + with: + python-version: 3.8 + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install requests + - name: execute upload script + env: + README_API_KEY: ${{ secrets.README_API_KEY }} + run: |- + python ./scripts/upload-readme.py + - name: execute docs generator script + run: python ./scripts/mk-generator.py # Script to generate controls library documentation \ No newline at end of file From 9fc81b710fcb309176541bf838a7160dc5ecd3f0 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 4 Mar 2024 16:40:13 +0200 Subject: [PATCH 126/195] add dependencies Signed-off-by: YiscahLevySilas1 --- .github/workflows/create-release-without-st.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/create-release-without-st.yaml b/.github/workflows/create-release-without-st.yaml index 34f9f1597..ef2f6330d 100644 --- a/.github/workflows/create-release-without-st.yaml +++ b/.github/workflows/create-release-without-st.yaml @@ -42,6 +42,10 @@ jobs: uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa with: python-version: 3.10.6 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install requests # generating subsections ids - name: Update frameworks subsections From ab5c9f6df690946f4981f7270e1ecf9270fd0930 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 5 Mar 2024 09:43:25 +0200 Subject: [PATCH 127/195] add new attack paths for security risks Signed-off-by: YiscahLevySilas1 --- .../external-wl-unauthenticated.json | 20 +++++++++++++++++++ ...ternal-wl-with-cluster-takeover-roles.json | 20 +++++++++++++++++++ controls/C-0256-exposuretointernet.json | 12 +++++++++++ ...0267-workloadwithclustertakeoverroles.json | 14 ++++++++++++- frameworks/security.json | 18 +++++++++++++++++ 5 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 attack-tracks/external-wl-unauthenticated.json create mode 100644 attack-tracks/external-wl-with-cluster-takeover-roles.json diff --git a/attack-tracks/external-wl-unauthenticated.json b/attack-tracks/external-wl-unauthenticated.json new file mode 100644 index 000000000..a09fc9ac6 --- /dev/null +++ b/attack-tracks/external-wl-unauthenticated.json @@ -0,0 +1,20 @@ +{ + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "external-database-without-authentication" + }, + "spec": { + "version": "1.0", + "data": { + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", + "subSteps": [ + { + "name": "Unauthenticated Access", + "description": "An unauthenticated attacker can access resources." + } + ] + } + } +} \ No newline at end of file diff --git a/attack-tracks/external-wl-with-cluster-takeover-roles.json b/attack-tracks/external-wl-with-cluster-takeover-roles.json new file mode 100644 index 000000000..d05a6fe0d --- /dev/null +++ b/attack-tracks/external-wl-with-cluster-takeover-roles.json @@ -0,0 +1,20 @@ +{ + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "external-workload-with-cluster-takeover-roles" + }, + "spec": { + "version": "1.0", + "data": { + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", + "subSteps": [ + { + "name": "Cluster/Resources Access", + "description": "An attacker has access to sensitive information and can leverage them by creating pods in the cluster." + } + ] + } + } +} \ No newline at end of file diff --git a/controls/C-0256-exposuretointernet.json b/controls/C-0256-exposuretointernet.json index c82bd03c0..044528cc5 100644 --- a/controls/C-0256-exposuretointernet.json +++ b/controls/C-0256-exposuretointernet.json @@ -16,6 +16,18 @@ "categories": [ "Initial Access" ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-database-without-authentication", + "categories": [ + "Initial Access" + ] } ] }, diff --git a/controls/C-0267-workloadwithclustertakeoverroles.json b/controls/C-0267-workloadwithclustertakeoverroles.json index 0376ae36a..5a7cc5994 100644 --- a/controls/C-0267-workloadwithclustertakeoverroles.json +++ b/controls/C-0267-workloadwithclustertakeoverroles.json @@ -1,6 +1,18 @@ { "name": "Workload with cluster takeover roles", - "attributes": {}, + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + } + ] + }, "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", "rulesNames": [ diff --git a/frameworks/security.json b/frameworks/security.json index 2840c9c17..a2928c2b4 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -182,6 +182,12 @@ "name": "Authenticated user has sensitive permissions" } }, + { + "controlID": "C-0267", + "patch": { + "name": "Workload with cluster takeover roles" + } + }, { "controlID": "C-0270", "patch": { @@ -193,6 +199,18 @@ "patch": { "name": "Ensure memory limits are set" } + }, + { + "controlID": "C-0272", + "patch": { + "name": "Workload with administrative roles" + } + }, + { + "controlID": "C-0273", + "patch": { + "name": "Outdated Kubernetes version" + } } ] } \ No newline at end of file From 024d59d06bdc63cc72eacfe00afe5426bef60bf4 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 5 Mar 2024 09:50:01 +0200 Subject: [PATCH 128/195] change node name Signed-off-by: YiscahLevySilas1 --- attack-tracks/external-wl-with-cluster-takeover-roles.json | 2 +- controls/C-0267-workloadwithclustertakeoverroles.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/attack-tracks/external-wl-with-cluster-takeover-roles.json b/attack-tracks/external-wl-with-cluster-takeover-roles.json index d05a6fe0d..d12d0a139 100644 --- a/attack-tracks/external-wl-with-cluster-takeover-roles.json +++ b/attack-tracks/external-wl-with-cluster-takeover-roles.json @@ -11,7 +11,7 @@ "description": "An attacker can access the Kubernetes environment.", "subSteps": [ { - "name": "Cluster/Resources Access", + "name": "Cluster Access", "description": "An attacker has access to sensitive information and can leverage them by creating pods in the cluster." } ] diff --git a/controls/C-0267-workloadwithclustertakeoverroles.json b/controls/C-0267-workloadwithclustertakeoverroles.json index 5a7cc5994..c9dc96379 100644 --- a/controls/C-0267-workloadwithclustertakeoverroles.json +++ b/controls/C-0267-workloadwithclustertakeoverroles.json @@ -8,7 +8,7 @@ { "attackTrack": "external-workload-with-cluster-takeover-roles", "categories": [ - "Initial Access" + "Cluster Access" ] } ] From 08f33d412cc05b977d0fb9e5cb5c2b8f679c21ff Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 6 Mar 2024 12:20:31 +0200 Subject: [PATCH 129/195] update default sensitiveKeyNames list Signed-off-by: YiscahLevySilas1 --- default-config-inputs.json | 3 --- 1 file changed, 3 deletions(-) diff --git a/default-config-inputs.json b/default-config-inputs.json index 699fa3127..a0bd9ee1f 100644 --- a/default-config-inputs.json +++ b/default-config-inputs.json @@ -50,11 +50,8 @@ "max_critical_vulnerabilities": ["5"], "max_high_vulnerabilities": ["10"], "sensitiveKeyNames": [ - "aws_access_key_id", "aws_secret_access_key", - "azure_batchai_storage_account", "azure_batchai_storage_key", - "azure_batch_account", "azure_batch_key", "secret", "key", From 74ef65b37ec8782e99c3e6de7b6e90031acaab15 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 6 Mar 2024 16:28:18 +0200 Subject: [PATCH 130/195] add releaseDev Signed-off-by: YiscahLevySilas1 --- ...0267-workloadwithclustertakeoverroles.json | 4 +- gitregostore/gitstoremethods.go | 14 + go.mod | 150 +- go.sum | 662 +- releaseDev/ControlID_RuleName.csv | 297 + releaseDev/FWName_CID_CName.csv | 475 + releaseDev/allcontrols.json | 4656 ++++++ releaseDev/armobest.json | 3066 ++++ releaseDev/attack_tracks.json | 109 + releaseDev/cis-aks-t1.2.0.json | 4282 ++++++ releaseDev/cis-eks-t1.2.0.json | 4456 ++++++ releaseDev/cis-v1.23-t1.0.1.json | 8583 +++++++++++ releaseDev/clusterscan.json | 1812 +++ releaseDev/controls.json | 7132 ++++++++++ releaseDev/default_config_inputs.json | 145 + releaseDev/devopsbest.json | 1107 ++ releaseDev/exceptions.json | 7820 ++++++++++ releaseDev/frameworks.json | 11764 ++++++++++++++++ releaseDev/mitre.json | 2112 +++ releaseDev/nsa.json | 2096 +++ releaseDev/rules.json | 8856 ++++++++++++ releaseDev/security.json | 3407 +++++ releaseDev/security_frameworks.json | 2569 ++++ releaseDev/soc2.json | 537 + releaseDev/workloadscan.json | 2021 +++ 25 files changed, 77708 insertions(+), 424 deletions(-) create mode 100644 releaseDev/ControlID_RuleName.csv create mode 100644 releaseDev/FWName_CID_CName.csv create mode 100644 releaseDev/allcontrols.json create mode 100644 releaseDev/armobest.json create mode 100644 releaseDev/attack_tracks.json create mode 100644 releaseDev/cis-aks-t1.2.0.json create mode 100644 releaseDev/cis-eks-t1.2.0.json create mode 100644 releaseDev/cis-v1.23-t1.0.1.json create mode 100644 releaseDev/clusterscan.json create mode 100644 releaseDev/controls.json create mode 100644 releaseDev/default_config_inputs.json create mode 100644 releaseDev/devopsbest.json create mode 100644 releaseDev/exceptions.json create mode 100644 releaseDev/frameworks.json create mode 100644 releaseDev/mitre.json create mode 100644 releaseDev/nsa.json create mode 100644 releaseDev/rules.json create mode 100644 releaseDev/security.json create mode 100644 releaseDev/security_frameworks.json create mode 100644 releaseDev/soc2.json create mode 100644 releaseDev/workloadscan.json diff --git a/controls/C-0267-workloadwithclustertakeoverroles.json b/controls/C-0267-workloadwithclustertakeoverroles.json index c9dc96379..e048787a9 100644 --- a/controls/C-0267-workloadwithclustertakeoverroles.json +++ b/controls/C-0267-workloadwithclustertakeoverroles.json @@ -9,7 +9,9 @@ "attackTrack": "external-workload-with-cluster-takeover-roles", "categories": [ "Cluster Access" - ] + ], + "displayRelatedResources": true, + "clickableResourceKind": "ServiceAccount" } ] }, diff --git a/gitregostore/gitstoremethods.go b/gitregostore/gitstoremethods.go index e6a83bde3..33ae79512 100644 --- a/gitregostore/gitstoremethods.go +++ b/gitregostore/gitstoremethods.go @@ -225,6 +225,20 @@ func (gs *GitRegoStore) GetOPAAttackTrackControls() ([]opapolicy.Control, error) return attackTrackControlsList, nil } +func (gs *GitRegoStore) GetAttackTrackCategoriesByControlIDAndAttackTrackName(controlID string, attackTrackName string) (opapolicy.AttackTrackCategories, error) { + control, err := gs.GetOPAControlByID(controlID) + if err != nil { + return opapolicy.AttackTrackCategories{}, fmt.Errorf("in GetAttackTrackCategoriesByControlIDAndAttackTrackName: error getting control: %s. error: %w", controlID, err) + } + categories := control.GetAllAttackTrackCategories() + for _, category := range categories { + if category.AttackTrack == attackTrackName { + return category, nil + } + } + return opapolicy.AttackTrackCategories{}, fmt.Errorf("attack track category '%s' not found in control '%s'", attackTrackName, controlID) +} + func (gs *GitRegoStore) GetOPAControlsNamesList() ([]string, error) { gs.controlsLock.RLock() defer gs.controlsLock.RUnlock() diff --git a/go.mod b/go.mod index 68c59d1c3..9526dcc00 100644 --- a/go.mod +++ b/go.mod @@ -1,20 +1,20 @@ module github.com/kubescape/regolibrary -go 1.19 +go 1.21 require ( - github.com/armosec/armoapi-go v0.0.256 + github.com/armosec/armoapi-go v0.0.330 github.com/go-gota/gota v0.12.0 - github.com/kubescape/opa-utils v0.0.272 + github.com/kubescape/opa-utils v0.0.279-0.20240306142553-f6c8e3e85e5b github.com/stretchr/testify v1.8.4 - go.uber.org/zap v1.24.0 + go.uber.org/zap v1.27.0 k8s.io/utils v0.0.0-20230726121419-3b25d923346b ) require ( - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/container v1.24.0 // indirect + cloud.google.com/go/container v1.27.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect @@ -25,8 +25,8 @@ require ( github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect github.com/armosec/gojay v1.2.15 // indirect - github.com/armosec/utils-go v0.0.20 // indirect - github.com/armosec/utils-k8s-go v0.0.16 // indirect + github.com/armosec/utils-go v0.0.57 // indirect + github.com/armosec/utils-k8s-go v0.0.26 // indirect github.com/aws/aws-sdk-go-v2 v1.19.1 // indirect github.com/aws/aws-sdk-go-v2/config v1.18.30 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.13.29 // indirect @@ -43,118 +43,126 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.20.1 // indirect github.com/aws/smithy-go v1.13.5 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/briandowns/spinner v1.23.0 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-oidc v2.2.1+incompatible // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/docker v24.0.5+incompatible // indirect - github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/docker/docker v25.0.1+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fatih/color v1.15.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/ghodss/yaml v1.0.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-ini/ini v1.67.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.4 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.11.0 // indirect - github.com/gorilla/mux v1.8.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525 // indirect - github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847 // indirect + github.com/kubescape/go-logger v0.0.22 // indirect + github.com/kubescape/k8s-interface v0.0.161 // indirect github.com/kubescape/rbac-utils v0.0.20 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/magiconair/properties v1.8.1 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/open-policy-agent/opa v0.55.0 // indirect + github.com/olvrng/ujson v1.1.0 // indirect + github.com/open-policy-agent/opa v0.61.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc4 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pquerna/cachecontrol v0.2.0 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/afero v1.6.0 // indirect + github.com/spf13/cast v1.3.0 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.7.0 // indirect github.com/stripe/stripe-go/v74 v74.28.0 // indirect + github.com/subosito/gotenv v1.2.0 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2 // indirect github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2 // indirect - github.com/uptrace/uptrace-go v1.16.0 // indirect + github.com/uptrace/uptrace-go v1.18.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/runtime v0.42.0 // indirect - go.opentelemetry.io/otel v1.16.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.39.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.opentelemetry.io/otel/sdk v1.16.0 // indirect - go.opentelemetry.io/otel/sdk/metric v0.39.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/atomic v1.11.0 // indirect + go.opentelemetry.io/contrib/instrumentation/runtime v0.44.0 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.41.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.41.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/sdk v1.21.0 // indirect + go.opentelemetry.io/otel/sdk/metric v0.41.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.11.0 // indirect - golang.org/x/exp v0.0.0-20230728194245-b0cb94b80691 // indirect - golang.org/x/net v0.12.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/time v0.3.0 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/oauth2 v0.14.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect gonum.org/v1/gonum v0.9.1 // indirect - google.golang.org/api v0.126.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/grpc v1.56.2 // indirect + google.golang.org/api v0.149.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/grpc v1.61.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.51.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.27.4 // indirect - k8s.io/apimachinery v0.27.4 // indirect - k8s.io/client-go v0.27.4 // indirect - k8s.io/klog/v2 v2.100.1 // indirect - k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + k8s.io/api v0.29.2 // indirect + k8s.io/apimachinery v0.29.2 // indirect + k8s.io/client-go v0.29.2 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/controller-runtime v0.15.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 9d5fc8a1e..b69e8831b 100644 --- a/go.sum +++ b/go.sum @@ -7,38 +7,19 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/container v1.24.0 h1:N51t/cgQJFqDD/W7Mb+IvmAPHrf8AbPx7Bb7aF4lROE= -cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.27.1 h1:ZfLRiFM9ddFE92SlA28rknI6YJMz5Z5huAQK+FKWxIQ= +cloud.google.com/go/container v1.27.1/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= @@ -59,9 +40,12 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthoriza github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0 h1:1u/K2BFv0MwkG6he8RYuUcbbeK22rkoZbg4lKa/msZU= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0/go.mod h1:U5gpsREQZE6SLk1t/cFfc1eMhYAlYpEzvaYXuDfefy8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -70,18 +54,22 @@ github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdII github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/armosec/armoapi-go v0.0.256 h1:eV8WWQ1r+2D0KHhLA6ux6lx67+uqkYe/uVHrOUFqz5c= -github.com/armosec/armoapi-go v0.0.256/go.mod h1:CJT5iH5VF30zjdQYXaQhsAm8IEHtM1T87HcFVXeLX54= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armosec/armoapi-go v0.0.330 h1:kvyLshJ3VBqWxDO/hvlpVU1DNsrmkb5M0oStw+Uwxb8= +github.com/armosec/armoapi-go v0.0.330/go.mod h1:6VYIw1hoNU3dTXKckMHNHhzhhPTMXDHtv5AFxvG4Q+U= github.com/armosec/gojay v1.2.15 h1:sSB2vnAvacUNkw9nzUYZKcPzhJOyk6/5LK2JCNdmoZY= github.com/armosec/gojay v1.2.15/go.mod h1:vzVAaay2TWJAngOpxu8aqLbye9jMgoKleuAOK+xsOts= -github.com/armosec/utils-go v0.0.20 h1:bvr+TMumEYdMsGFGSsaQysST7K02nNROFvuajNuKPlw= -github.com/armosec/utils-go v0.0.20/go.mod h1:ZEFiSv8KpTFNT19jHis1IengiF/BGDvg7tHmXo+cwxs= -github.com/armosec/utils-k8s-go v0.0.16 h1:h46PoxAb4OHA2p719PzcAS03lADw4lH4TyRMaZ3ix/g= -github.com/armosec/utils-k8s-go v0.0.16/go.mod h1:QX0QAGlH7KCZq810eO9QjTYqkhjw8cvrr96TZfaUGrk= +github.com/armosec/utils-go v0.0.57 h1:0RaqexK+t7HeKWfldBv2C1JiLLGuUx9FP0DGWDNRJpg= +github.com/armosec/utils-go v0.0.57/go.mod h1:4wfINE8JTQ6EHvSL2jki0Q3/D1j6oDi6sxxrtAEug74= +github.com/armosec/utils-k8s-go v0.0.26 h1:gVSV1mrALyphaesc+JXbx9SfbxLqfgg1KvvC1/0Hfkk= +github.com/armosec/utils-k8s-go v0.0.26/go.mod h1:WL2brx3tszxeSl1yHac0oAVJUg3o22HYh1dPjaSfjXU= github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.19.1 h1:STs0lbbpXu3byTPcnRLghs2DH0yk9qKDo27TyyJSKsM= github.com/aws/aws-sdk-go-v2 v1.19.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= @@ -115,76 +103,82 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.20.1 h1:U7h9CPoyMfVoN5jUglB0LglCMP10 github.com/aws/aws-sdk-go-v2/service/sts v1.20.1/go.mod h1:BUHusg4cOA1TFGegj7x8/eoWrbdHzJfoMrXcbMQAG0k= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= +github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= +github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= +github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= -github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v25.0.1+incompatible h1:k5TYd5rIVQRSqcTwCID+cyVA0yRg86+Pcrz1ls0/frA= +github.com/docker/docker v25.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -193,40 +187,44 @@ github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3 github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gota/gota v0.12.0 h1:T5BDg1hTf5fZ/CO+T/N0E+DDqUhvoKBl+UVckgcAAQg= github.com/go-gota/gota v0.12.0/go.mod h1:UT+NsWpZC/FhaOyWb9Hui0jXg0Iq8e/YugZHTbyW/34= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -234,88 +232,96 @@ github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200j github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= -github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= @@ -323,51 +329,72 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525 h1:9wzR38LebiA58cGxRBnsF78k4eJGnk7UetoTPKkyz2A= -github.com/kubescape/go-logger v0.0.14-0.20230730134225-e59751254525/go.mod h1:Al+yTE+vemECb/Myn2G9+2o2uFmMtphbkQmxf4OEHxE= -github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847 h1:GGuS6pE6KGa5q7j9fkRN3p1eQw16/jLUMnPR8FT3O6M= -github.com/kubescape/k8s-interface v0.0.135-0.20230730135750-e6e709507847/go.mod h1:eBd6few7RYplnNNlHoe6d7jMmoE6Kx1emapJ91euBbY= -github.com/kubescape/opa-utils v0.0.272 h1:hqEuYGf/B2HuqbdVUtSsUGJopfXbQOgl3+KvFAu2Gd8= -github.com/kubescape/opa-utils v0.0.272/go.mod h1:VmplJnkhei6mDna+6z183k/HX6GOPgsXiwIlDW8mhKw= +github.com/kubescape/go-logger v0.0.22 h1:gle7wH6emOiGv9ljdpVi82pWLQ3jGucrUucvil6JXHE= +github.com/kubescape/go-logger v0.0.22/go.mod h1:x3HBpZo3cMT/WIdy18BxvVVd5D0e/PWFVk/HiwBNu3g= +github.com/kubescape/k8s-interface v0.0.161 h1:v6b3/kmA4o/2niNrejrbXj5X9MLfH0UrpI3s+e/fdwc= +github.com/kubescape/k8s-interface v0.0.161/go.mod h1:oF+Yxug3Kpfu9Yr2j63wy7gwswrKXpiqI0mLk/7gF/s= +github.com/kubescape/opa-utils v0.0.279-0.20240306142553-f6c8e3e85e5b h1:lY9f5LfjD6cy+remULO//ey2FK+mVEJSIxjatuu+6kI= +github.com/kubescape/opa-utils v0.0.279-0.20240306142553-f6c8e3e85e5b/go.mod h1:N/UnbZHpoiHQH7O50yadhIXZvVl0IVtTGBmePPrSQSg= github.com/kubescape/rbac-utils v0.0.20 h1:1MMxsCsCZ3ntDi8f9ZYYcY+K7bv50bDW5ZvnGnhMhJw= github.com/kubescape/rbac-utils v0.0.20/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -378,48 +405,72 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= -github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= -github.com/open-policy-agent/opa v0.55.0 h1:s7Vm4ph6zDqqP/KzvUSw9fsKVsm9lhbTZhYGxxTK7mo= -github.com/open-policy-agent/opa v0.55.0/go.mod h1:2Vh8fj/bXCqSwGMbBiHGrw+O8yrho6T/fdaHt5ROmaQ= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olvrng/ujson v1.1.0 h1:8xVUzVlqwdMVWh5d1UHBtLQ1D50nxoPuPEq9Wozs8oA= +github.com/olvrng/ujson v1.1.0/go.mod h1:Mz4G3RODTUfbkKyvi0lgmPx/7vd3Saksk+1jgk8s9xo= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/open-policy-agent/opa v0.61.0 h1:nhncQ2CAYtQTV/SMBhDDPsCpCQsUW+zO/1j+T5V7oZg= +github.com/open-policy-agent/opa v0.61.0/go.mod h1:7OUuzJnsS9yHf8lw0ApfcbrnaRG1EkN3J2fuuqi4G/E= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= -github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k= github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -443,21 +494,36 @@ github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1l github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -467,84 +533,86 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stripe/stripe-go/v74 v74.28.0 h1:ItzPPy+cjMKbR3Oihknt/8dv6PANp3hTThUGZjhF9lc= github.com/stripe/stripe-go/v74 v74.28.0/go.mod h1:f9L6LvaXa35ja7eyvP6GQswoaIPaBRvGAimAO+udbBw= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2 h1:CNznWHkrbA6o1q2H/BsH4tIHf4zbKNtndeoV+AH8z0U= github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.2/go.mod h1:7YSrHCmYPHIXjTWnKSU7EGT0TFEcm3WwSeQquwCGg38= github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2 h1:uyrW06oJi4iWvhjPLVfk4qrSP2Zm0AMozKKDmp6i4pE= github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.2/go.mod h1:PMAs2dNxP55lgt6xu0if+Jasm6s+Xpmqn6ev1NyDfnI= -github.com/uptrace/uptrace-go v1.16.0 h1:yB9vt1hBYYoXWExNx0okubLOjd339d7lH+/5o+Lp+MY= -github.com/uptrace/uptrace-go v1.16.0/go.mod h1:Ssc5wLpoL+9V0qkT5FtrIiru9SY4xb7q1UVLjSpxpCg= +github.com/uptrace/uptrace-go v1.18.0 h1:RY15qy19C0irbe2UCxQbjenk8WyUdvUV756R9ZpqCGI= +github.com/uptrace/uptrace-go v1.18.0/go.mod h1:BUW3sFgEyRmZIxts4cv6TGaJnWAW95uW78GIiSdChOQ= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= -go.opentelemetry.io/contrib/instrumentation/runtime v0.42.0 h1:EbmAUG9hEAMXyfWEasIt2kmh/WmXUznUksChApTgBGc= -go.opentelemetry.io/contrib/instrumentation/runtime v0.42.0/go.mod h1:rD9feqRYP24P14t5kmhNMqsqm1jvKmpx2H2rKVw52V8= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 h1:f6BwB2OACc3FCbYVznctQ9V6KK7Vq6CjmYXJ7DeSs4E= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0/go.mod h1:UqL5mZ3qs6XYhDnZaW1Ps4upD+PX6LipH40AoeuIlwU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.39.0 h1:rm+Fizi7lTM2UefJ1TO347fSRcwmIsUAaZmYmIGBRAo= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.39.0/go.mod h1:sWFbI3jJ+6JdjOVepA5blpv/TJ20Hw+26561iMbWcwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 h1:+XWJd3jf75RXJq29mxbuXhCXFDG3S3R4vBUeSI2P7tE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0/go.mod h1:hqgzBPTf4yONMFgdZvL/bK42R/iinTyVQtiWihs3SZc= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= -go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= -go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= -go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/contrib/instrumentation/runtime v0.44.0 h1:TXu20nL4yYfJlQeqG/D3Ia6b0p2HZmLfJto9hqJTQ/c= +go.opentelemetry.io/contrib/instrumentation/runtime v0.44.0/go.mod h1:tQ5gBnfjndV1su3+DiLuu6rnd9hBBzg4rkRILnjSNFg= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.41.0 h1:k0k7hFNDd8K4iOMJXj7s8sHaC4mhTlAeppRmZXLgZ6k= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.41.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.41.0 h1:HgbDTD8pioFdY3NRc/YCvsWjqQPtweGyXxa32LgnTOw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.41.0/go.mod h1:tmvt/yK5Es5d6lHYWerLSOna8lCEfrBVX/a9M0ggqss= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0 h1:hSWWvDjXHVLq9DkmB+77fl8v7+t+yYiS+eNkiplDK54= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0/go.mod h1:zG7KQql1WjZCaUJd+L/ReSYx4bjbYJxg5ws9ws+mYes= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/sdk/metric v0.41.0 h1:c3sAt9/pQ5fSIUfl0gPtClV3HhE18DCVzByD33R/zsk= +go.opentelemetry.io/otel/sdk/metric v0.41.0/go.mod h1:PmOmSt+iOklKtIg5O4Vz9H/ttcRFSNTgii+E1KGyn1w= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -554,13 +622,8 @@ golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxT golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230728194245-b0cb94b80691 h1:/yRP+0AN7mf5DkD3BAI6TOFnd51gEoDEb8o35jIFtgw= -golang.org/x/exp v0.0.0-20230728194245-b0cb94b80691/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -578,23 +641,22 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -604,42 +666,22 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= +golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -647,14 +689,18 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -663,42 +709,23 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -707,14 +734,14 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -725,6 +752,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -735,35 +763,13 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -784,29 +790,16 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= -google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -820,35 +813,13 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -857,21 +828,10 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= +google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -880,35 +840,37 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -916,31 +878,27 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.27.4 h1:0pCo/AN9hONazBKlNUdhQymmnfLRbSZjd5H5H3f0bSs= -k8s.io/api v0.27.4/go.mod h1:O3smaaX15NfxjzILfiln1D8Z3+gEYpjEpiNA/1EVK1Y= -k8s.io/apimachinery v0.27.4 h1:CdxflD4AF61yewuid0fLl6bM4a3q04jWel0IlP+aYjs= -k8s.io/apimachinery v0.27.4/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= -k8s.io/client-go v0.27.4 h1:vj2YTtSJ6J4KxaC88P4pMPEQECWMY8gqPqsTgUKzvjk= -k8s.io/client-go v0.27.4/go.mod h1:ragcly7lUlN0SRPk5/ZkGnDjPknzb37TICq07WhI6Xc= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/releaseDev/ControlID_RuleName.csv b/releaseDev/ControlID_RuleName.csv new file mode 100644 index 000000000..2b780bf71 --- /dev/null +++ b/releaseDev/ControlID_RuleName.csv @@ -0,0 +1,297 @@ +ControlID,RuleName +C-0105,ensure-that-the-admin.conf-file-ownership-is-set-to-root-root +C-0108,ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive +C-0209,list-all-namespaces +C-0106,ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive +C-0084,exposed-rce-pods +C-0012,rule-credentials-in-env-var +C-0012,rule-credentials-configmap +C-0207,rule-secrets-in-env-var +C-0270,resources-cpu-limits +C-0124,ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used +C-0216,psp-deny-hostnetwork +C-0129,ensure-that-the-api-server-profiling-argument-is-set-to-false +C-0111,ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive +C-0267,workload-with-cluster-takeover-roles +C-0160,k8s-audit-logs-enabled-native-cis +C-0199,pod-security-admission-baseline-applied-1 +C-0199,pod-security-admission-baseline-applied-2 +C-0226,alert-container-optimized-os-not-in-use +C-0145,ensure-that-the-controller-manager-profiling-argument-is-set-to-false +C-0167,ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root +C-0151,ensure-that-the-scheduler-profiling-argument-is-set-to-false +C-0057,rule-privilege-escalation +C-0015,rule-can-list-get-secrets-v1 +C-0159,etcd-unique-ca +C-0134,ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate +C-0112,ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600 +C-0152,ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1 +C-0122,ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set +C-0004,resources-memory-limit-and-request +C-0204,pod-security-admission-baseline-applied-1 +C-0204,pod-security-admission-baseline-applied-2 +C-0070,enforce-kubelet-client-tls-authentication-updated +C-0102,ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive +C-0021,exposed-sensitive-interfaces-v1 +C-0103,ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd +C-0163,ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root +C-0002,exec-into-container-v1 +C-0213,psp-deny-privileged-container +C-0255,workload-mounted-secrets +C-0113,ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false +C-0240,rule-cni-enabled-aks +C-0153,etcd-tls-enabled +C-0030,ingress-and-egress-blocked +C-0063,rule-can-portforward-v1 +C-0059,nginx-ingress-snippet-annotation-vulnerability +C-0269,resources-memory-requests +C-0254,rule-manual +C-0197,pod-security-admission-restricted-applied-1 +C-0197,pod-security-admission-restricted-applied-2 +C-0133,ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate +C-0036,list-all-validating-webhooks +C-0248,ensure-clusters-are-created-with-private-nodes +C-0061,pods-in-default-namespace +C-0196,pod-security-admission-baseline-applied-1 +C-0196,pod-security-admission-baseline-applied-2 +C-0045,alert-rw-hostpath +C-0180,kubelet-event-qps +C-0217,psp-deny-allowprivilegeescalation +C-0046,insecure-capabilities +C-0130,ensure-that-the-api-server-audit-log-path-argument-is-set +C-0127,ensure-that-the-admission-control-plugin-NodeRestriction-is-set +C-0210,set-seccomp-profile-RuntimeDefault +C-0176,kubelet-streaming-connection-idle-timeout +C-0110,ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root +C-0089,CVE-2022-3172 +C-0273,outdated-k8s-version +C-0037,rule-can-update-configmap-v1 +C-0078,container-image-repository +C-0078,container-image-repository-v1 +C-0225,ensure-default-service-accounts-has-only-default-roles +C-0225,automount-default-service-account +C-0123,ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set +C-0227,ensure-endpointprivateaccess-is-enabled +C-0198,pod-security-admission-restricted-applied-1 +C-0198,pod-security-admission-restricted-applied-2 +C-0119,ensure-that-the-api-server-authorization-mode-argument-includes-Node +C-0251,list-role-definitions-in-acr +C-0044,container-hostPort +C-0238,Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive +C-0118,ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow +C-0233,alert-fargate-not-in-use +C-0052,instance-metadata-api-access +C-0035,rule-list-all-cluster-admins-v1 +C-0136,ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate +C-0115,ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set +C-0258,workload-mounted-configmap +C-0245,encrypt-traffic-to-https-load-balancers-with-tls-certificates +C-0235,ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive +C-0144,ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate +C-0107,ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root +C-0212,pods-in-default-namespace +C-0212,rolebinding-in-default-namespace +C-0212,role-in-default-namespace +C-0212,configmap-in-default-namespace +C-0212,endpoints-in-default-namespace +C-0212,persistentvolumeclaim-in-default-namespace +C-0212,podtemplate-in-default-namespace +C-0212,replicationcontroller-in-default-namespace +C-0212,service-in-default-namespace +C-0212,serviceaccount-in-default-namespace +C-0212,endpointslice-in-default-namespace +C-0212,horizontalpodautoscaler-in-default-namespace +C-0212,lease-in-default-namespace +C-0212,csistoragecapacity-in-default-namespace +C-0212,ingress-in-default-namespace +C-0212,poddisruptionbudget-in-default-namespace +C-0212,resources-secret-in-default-namespace +C-0001,rule-identify-blocklisted-image-registries +C-0001,rule-identify-blocklisted-image-registries-v1 +C-0262,anonymous-access-enabled +C-0214,psp-deny-hostpid +C-0128,ensure-that-the-api-server-secure-port-argument-is-not-set-to-0 +C-0093,ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root +C-0073,naked-pods +C-0104,ensure-that-the-admin.conf-file-permissions-are-set-to-600 +C-0069,anonymous-requests-to-kubelet-service-updated +C-0189,automount-default-service-account +C-0189,namespace-without-service-account +C-0109,ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root +C-0229,ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks +C-0188,rule-can-create-pod +C-0173,kubelet-authorization-mode-alwaysAllow +C-0141,ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate +C-0161,audit-policy-content +C-0234,ensure-external-secrets-storage-is-in-use +C-0100,ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive +C-0203,pod-security-admission-baseline-applied-1 +C-0203,pod-security-admission-baseline-applied-2 +C-0157,etcd-peer-client-auth-cert +C-0094,ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive +C-0181,validate-kubelet-tls-configuration-updated +C-0183,kubelet-rotate-kubelet-server-certificate +C-0168,ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive +C-0193,pod-security-admission-baseline-applied-1 +C-0193,pod-security-admission-baseline-applied-2 +C-0016,rule-allow-privilege-escalation +C-0087,CVE-2022-23648 +C-0256,exposure-to-internet +C-0099,ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root +C-0241,ensure-azure-rbac-is-set +C-0026,rule-deny-cronjobs +C-0165,if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root +C-0121,ensure-that-the-admission-control-plugin-EventRateLimit-is-set +C-0095,ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root +C-0055,linux-hardening +C-0182,kubelet-rotate-certificates +C-0075,image-pull-policy-is-not-set-to-always +C-0220,psp-required-drop-capabilities +C-0155,etcd-auto-tls-disabled +C-0260,ensure_network_policy_configured_in_labels +C-0263,ingress-no-tls +C-0162,ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive +C-0228,ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks +C-0201,pod-security-admission-restricted-applied-1 +C-0201,pod-security-admission-restricted-applied-2 +C-0247,restrict-access-to-the-control-plane-endpoint +C-0131,ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate +C-0042,rule-can-ssh-to-pod-v1 +C-0184,kubelet-strong-cryptographics-ciphers +C-0211,rule-privilege-escalation +C-0211,immutable-container-filesystem +C-0211,non-root-containers +C-0211,drop-capability-netraw +C-0211,set-seLinuxOptions +C-0211,set-seccomp-profile +C-0211,set-procmount-default +C-0211,set-fsgroup-value +C-0211,set-fsgroupchangepolicy-value +C-0211,set-sysctls-params +C-0211,set-supplementalgroups-values +C-0211,rule-allow-privilege-escalation +C-0067,k8s-audit-logs-enabled-cloud +C-0067,k8s-audit-logs-enabled-native +C-0066,secret-etcd-encryption-cloud +C-0066,etcd-encryption-native +C-0265,system-authenticated-allowed-to-take-over-cluster +C-0244,secret-etcd-encryption-cloud +C-0005,insecure-port-flag +C-0179,kubelet-hostname-override +C-0140,ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate +C-0007,rule-excessive-delete-rights-v1 +C-0221,ensure-image-scanning-enabled-cloud +C-0222,ensure-aws-policies-are-present +C-0147,ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate +C-0090,CVE-2022-39328 +C-0195,pod-security-admission-baseline-applied-1 +C-0195,pod-security-admission-baseline-applied-2 +C-0120,ensure-that-the-api-server-authorization-mode-argument-includes-RBAC +C-0191,rule-can-bind-escalate +C-0191,rule-can-impersonate-users-groups-v1 +C-0205,ensure-that-the-cni-in-use-supports-network-policies +C-0232,review-roles-with-aws-iam-authenticator +C-0246,rule-manual +C-0041,host-network-access +C-0208,external-secret-storage +C-0017,immutable-container-filesystem +C-0185,cluster-admin-role +C-0172,anonymous-requests-to-kubelet-service-updated +C-0218,psp-deny-root-container +C-0077,k8s-common-labels-usage +C-0200,pod-security-admission-restricted-applied-1 +C-0200,pod-security-admission-restricted-applied-2 +C-0268,resources-cpu-requests +C-0174,enforce-kubelet-client-tls-authentication-updated +C-0242,rule-hostile-multitenant-workloads +C-0261,serviceaccount-token-mount +C-0098,ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive +C-0135,ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true +C-0171,ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root +C-0170,if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive +C-0175,read-only-port-enabled-updated +C-0062,sudo-in-container-entrypoint +C-0034,automount-service-account +C-0154,etcd-client-auth-cert +C-0101,ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root +C-0009,resource-policies +C-0117,ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate +C-0206,internal-networking +C-0252,ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled +C-0137,ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate +C-0178,kubelet-ip-tables +C-0166,ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive +C-0083,exposed-critical-pods +C-0146,ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true +C-0192,pod-security-admission-applied-1 +C-0192,pod-security-admission-applied-2 +C-0259,rule-credentials-in-env-var +C-0148,ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate +C-0239,ensure-default-service-accounts-has-only-default-roles +C-0236,verify-image-signature +C-0215,psp-deny-hostipc +C-0088,rbac-enabled-cloud +C-0088,rbac-enabled-native +C-0085,excessive_amount_of_vulnerabilities_pods +C-0243,ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider +C-0096,ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive +C-0257,workload-mounted-pvc +C-0194,pod-security-admission-baseline-applied-1 +C-0194,pod-security-admission-baseline-applied-2 +C-0190,automount-service-account +C-0038,host-pid-ipc-privileges +C-0116,ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate +C-0039,list-all-mutating-webhooks +C-0219,psp-deny-allowed-capabilities +C-0125,ensure-that-the-admission-control-plugin-ServiceAccount-is-set +C-0054,internal-networking +C-0272,workload-with-administrative-roles +C-0249,rule-manual +C-0048,alert-any-hostpath +C-0114,ensure-that-the-api-server-token-auth-file-parameter-is-not-set +C-0068,psp-enabled-cloud +C-0068,psp-enabled-native +C-0143,ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers +C-0081,CVE-2022-24348 +C-0079,CVE-2022-0185 +C-0250,ensure-service-principle-has-read-only-permissions +C-0091,CVE-2022-47633 +C-0018,configured-readiness-probe +C-0074,containers-mounting-docker-socket +C-0138,ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate +C-0164,if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive +C-0149,ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true +C-0177,kubelet-protect-kernel-defaults +C-0150,ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1 +C-0020,alert-mount-potential-credentials-paths +C-0014,rule-access-dashboard-subject-v1 +C-0014,rule-access-dashboard-wl-v1 +C-0186,rule-can-list-get-secrets-v1 +C-0264,pv-without-encryption +C-0187,rule-list-all-cluster-admins-v1 +C-0013,non-root-containers +C-0202,pod-security-admission-baseline-applied-1 +C-0202,pod-security-admission-baseline-applied-2 +C-0058,Symlink-Exchange-Can-Allow-Host-Filesystem-Access +C-0076,label-usage-for-resources +C-0050,resources-cpu-limit-and-request +C-0142,ensure-that-the-api-server-encryption-providers-are-appropriately-configured +C-0237,has-image-signature +C-0169,ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root +C-0156,etcd-peer-tls-enabled +C-0223,ensure_nodeinstancerole_has_right_permissions_for_ecr +C-0231,ensure-https-loadbalancers-encrypted-with-tls-aws +C-0132,ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate +C-0158,etcd-peer-auto-tls-disabled +C-0253,rule-identify-old-k8s-registry +C-0053,access-container-service-account-v1 +C-0056,configured-liveness-probe +C-0097,ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root +C-0230,ensure-network-policy-is-enabled-eks +C-0092,ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive +C-0049,internal-networking +C-0065,rule-can-impersonate-users-groups-v1 +C-0126,ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set +C-0031,rule-can-delete-k8s-events-v1 +C-0271,resources-memory-limits +C-0139,ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate diff --git a/releaseDev/FWName_CID_CName.csv b/releaseDev/FWName_CID_CName.csv new file mode 100644 index 000000000..466904a6e --- /dev/null +++ b/releaseDev/FWName_CID_CName.csv @@ -0,0 +1,475 @@ +frameworkName,ControlID,ControlName +DevOpsBest,C-0018,Configured readiness probe +DevOpsBest,C-0044,Container hostPort +DevOpsBest,C-0056,Configured liveness probe +DevOpsBest,C-0061,Pods in default namespace +DevOpsBest,C-0073,Naked pods +DevOpsBest,C-0074,Container runtime socket mounted +DevOpsBest,C-0075,Image pull policy on latest tag +DevOpsBest,C-0076,Label usage for resources +DevOpsBest,C-0077,K8s common labels usage +DevOpsBest,C-0253,Deprecated Kubernetes image registry +DevOpsBest,C-0268,Ensure CPU requests are set +DevOpsBest,C-0269,Ensure memory requests are set +DevOpsBest,C-0270,Ensure CPU limits are set +DevOpsBest,C-0271,Ensure memory limits are set +AllControls,C-0002,Prevent containers from allowing command execution +AllControls,C-0005,API server insecure port is enabled +AllControls,C-0007,Roles with delete capabilities +AllControls,C-0012,Applications credentials in configuration files +AllControls,C-0013,Non-root containers +AllControls,C-0014,Access Kubernetes dashboard +AllControls,C-0015,List Kubernetes secrets +AllControls,C-0016,Allow privilege escalation +AllControls,C-0017,Immutable container filesystem +AllControls,C-0018,Configured readiness probe +AllControls,C-0020,Mount service principal +AllControls,C-0021,Exposed sensitive interfaces +AllControls,C-0026,Kubernetes CronJob +AllControls,C-0030,Ingress and Egress blocked +AllControls,C-0031,Delete Kubernetes events +AllControls,C-0034,Automatic mapping of service account +AllControls,C-0035,Administrative Roles +AllControls,C-0036,Validate admission controller (validating) +AllControls,C-0038,Host PID/IPC privileges +AllControls,C-0039,Validate admission controller (mutating) +AllControls,C-0041,HostNetwork access +AllControls,C-0042,SSH server running inside container +AllControls,C-0044,Container hostPort +AllControls,C-0045,Writable hostPath mount +AllControls,C-0046,Insecure capabilities +AllControls,C-0048,HostPath mount +AllControls,C-0049,Network mapping +AllControls,C-0052,Instance Metadata API +AllControls,C-0053,Access container service account +AllControls,C-0054,Cluster internal networking +AllControls,C-0055,Linux hardening +AllControls,C-0056,Configured liveness probe +AllControls,C-0057,Privileged container +AllControls,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. +AllControls,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability +AllControls,C-0061,Pods in default namespace +AllControls,C-0062,Sudo in container entrypoint +AllControls,C-0063,Portforwarding privileges +AllControls,C-0065,No impersonation +AllControls,C-0066,Secret/etcd encryption enabled +AllControls,C-0067,Audit logs enabled +AllControls,C-0068,PSP enabled +AllControls,C-0069,Disable anonymous access to Kubelet service +AllControls,C-0070,Enforce Kubelet client TLS authentication +AllControls,C-0073,Naked pods +AllControls,C-0074,Container runtime socket mounted +AllControls,C-0075,Image pull policy on latest tag +AllControls,C-0076,Label usage for resources +AllControls,C-0077,K8s common labels usage +AllControls,C-0078,Images from allowed registry +AllControls,C-0079,CVE-2022-0185-linux-kernel-container-escape +AllControls,C-0081,CVE-2022-24348-argocddirtraversal +AllControls,C-0087,CVE-2022-23648-containerd-fs-escape +AllControls,C-0088,RBAC enabled +AllControls,C-0090,CVE-2022-39328-grafana-auth-bypass +AllControls,C-0091,CVE-2022-47633-kyverno-signature-bypass +AllControls,C-0262,Anonymous user has RoleBinding +AllControls,C-0265,system:authenticated user has elevated roles +AllControls,C-0270,Ensure CPU limits are set +AllControls,C-0271,Ensure memory limits are set +cis-v1.23-t1.0.1,C-0092,Ensure that the API server pod specification file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0093,Ensure that the API server pod specification file ownership is set to root:root +cis-v1.23-t1.0.1,C-0094,Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0095,Ensure that the controller manager pod specification file ownership is set to root:root +cis-v1.23-t1.0.1,C-0096,Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0097,Ensure that the scheduler pod specification file ownership is set to root:root +cis-v1.23-t1.0.1,C-0098,Ensure that the etcd pod specification file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0099,Ensure that the etcd pod specification file ownership is set to root:root +cis-v1.23-t1.0.1,C-0100,Ensure that the Container Network Interface file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0101,Ensure that the Container Network Interface file ownership is set to root:root +cis-v1.23-t1.0.1,C-0102,Ensure that the etcd data directory permissions are set to 700 or more restrictive +cis-v1.23-t1.0.1,C-0103,Ensure that the etcd data directory ownership is set to etcd:etcd +cis-v1.23-t1.0.1,C-0104,Ensure that the admin.conf file permissions are set to 600 +cis-v1.23-t1.0.1,C-0105,Ensure that the admin.conf file ownership is set to root:root +cis-v1.23-t1.0.1,C-0106,Ensure that the scheduler.conf file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0107,Ensure that the scheduler.conf file ownership is set to root:root +cis-v1.23-t1.0.1,C-0108,Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0109,Ensure that the controller-manager.conf file ownership is set to root:root +cis-v1.23-t1.0.1,C-0110,Ensure that the Kubernetes PKI directory and file ownership is set to root:root +cis-v1.23-t1.0.1,C-0111,Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0112,Ensure that the Kubernetes PKI key file permissions are set to 600 +cis-v1.23-t1.0.1,C-0113,Ensure that the API Server --anonymous-auth argument is set to false +cis-v1.23-t1.0.1,C-0114,Ensure that the API Server --token-auth-file parameter is not set +cis-v1.23-t1.0.1,C-0115,Ensure that the API Server --DenyServiceExternalIPs is not set +cis-v1.23-t1.0.1,C-0116,Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate +cis-v1.23-t1.0.1,C-0117,Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate +cis-v1.23-t1.0.1,C-0118,Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow +cis-v1.23-t1.0.1,C-0119,Ensure that the API Server --authorization-mode argument includes Node +cis-v1.23-t1.0.1,C-0120,Ensure that the API Server --authorization-mode argument includes RBAC +cis-v1.23-t1.0.1,C-0121,Ensure that the admission control plugin EventRateLimit is set +cis-v1.23-t1.0.1,C-0122,Ensure that the admission control plugin AlwaysAdmit is not set +cis-v1.23-t1.0.1,C-0123,Ensure that the admission control plugin AlwaysPullImages is set +cis-v1.23-t1.0.1,C-0124,Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used +cis-v1.23-t1.0.1,C-0125,Ensure that the admission control plugin ServiceAccount is set +cis-v1.23-t1.0.1,C-0126,Ensure that the admission control plugin NamespaceLifecycle is set +cis-v1.23-t1.0.1,C-0127,Ensure that the admission control plugin NodeRestriction is set +cis-v1.23-t1.0.1,C-0128,Ensure that the API Server --secure-port argument is not set to 0 +cis-v1.23-t1.0.1,C-0129,Ensure that the API Server --profiling argument is set to false +cis-v1.23-t1.0.1,C-0130,Ensure that the API Server --audit-log-path argument is set +cis-v1.23-t1.0.1,C-0131,Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate +cis-v1.23-t1.0.1,C-0132,Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate +cis-v1.23-t1.0.1,C-0133,Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate +cis-v1.23-t1.0.1,C-0134,Ensure that the API Server --request-timeout argument is set as appropriate +cis-v1.23-t1.0.1,C-0135,Ensure that the API Server --service-account-lookup argument is set to true +cis-v1.23-t1.0.1,C-0136,Ensure that the API Server --service-account-key-file argument is set as appropriate +cis-v1.23-t1.0.1,C-0137,Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate +cis-v1.23-t1.0.1,C-0138,Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate +cis-v1.23-t1.0.1,C-0139,Ensure that the API Server --client-ca-file argument is set as appropriate +cis-v1.23-t1.0.1,C-0140,Ensure that the API Server --etcd-cafile argument is set as appropriate +cis-v1.23-t1.0.1,C-0141,Ensure that the API Server --encryption-provider-config argument is set as appropriate +cis-v1.23-t1.0.1,C-0142,Ensure that encryption providers are appropriately configured +cis-v1.23-t1.0.1,C-0143,Ensure that the API Server only makes use of Strong Cryptographic Ciphers +cis-v1.23-t1.0.1,C-0144,Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate +cis-v1.23-t1.0.1,C-0145,Ensure that the Controller Manager --profiling argument is set to false +cis-v1.23-t1.0.1,C-0146,Ensure that the Controller Manager --use-service-account-credentials argument is set to true +cis-v1.23-t1.0.1,C-0147,Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate +cis-v1.23-t1.0.1,C-0148,Ensure that the Controller Manager --root-ca-file argument is set as appropriate +cis-v1.23-t1.0.1,C-0149,Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true +cis-v1.23-t1.0.1,C-0150,Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1 +cis-v1.23-t1.0.1,C-0151,Ensure that the Scheduler --profiling argument is set to false +cis-v1.23-t1.0.1,C-0152,Ensure that the Scheduler --bind-address argument is set to 127.0.0.1 +cis-v1.23-t1.0.1,C-0153,Ensure that the --cert-file and --key-file arguments are set as appropriate +cis-v1.23-t1.0.1,C-0154,Ensure that the --client-cert-auth argument is set to true +cis-v1.23-t1.0.1,C-0155,Ensure that the --auto-tls argument is not set to true +cis-v1.23-t1.0.1,C-0156,Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate +cis-v1.23-t1.0.1,C-0157,Ensure that the --peer-client-cert-auth argument is set to true +cis-v1.23-t1.0.1,C-0158,Ensure that the --peer-auto-tls argument is not set to true +cis-v1.23-t1.0.1,C-0159,Ensure that a unique Certificate Authority is used for etcd +cis-v1.23-t1.0.1,C-0160,Ensure that a minimal audit policy is created +cis-v1.23-t1.0.1,C-0161,Ensure that the audit policy covers key security concerns +cis-v1.23-t1.0.1,C-0162,Ensure that the kubelet service file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0163,Ensure that the kubelet service file ownership is set to root:root +cis-v1.23-t1.0.1,C-0164,If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0165,If proxy kubeconfig file exists ensure ownership is set to root:root +cis-v1.23-t1.0.1,C-0166,Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root +cis-v1.23-t1.0.1,C-0168,Ensure that the certificate authorities file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0169,Ensure that the client certificate authorities file ownership is set to root:root +cis-v1.23-t1.0.1,C-0170,If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root +cis-v1.23-t1.0.1,C-0172,Ensure that the --anonymous-auth argument is set to false +cis-v1.23-t1.0.1,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow +cis-v1.23-t1.0.1,C-0174,Ensure that the --client-ca-file argument is set as appropriate +cis-v1.23-t1.0.1,C-0175,Verify that the --read-only-port argument is set to 0 +cis-v1.23-t1.0.1,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 +cis-v1.23-t1.0.1,C-0177,Ensure that the --protect-kernel-defaults argument is set to true +cis-v1.23-t1.0.1,C-0178,Ensure that the --make-iptables-util-chains argument is set to true +cis-v1.23-t1.0.1,C-0179,Ensure that the --hostname-override argument is not set +cis-v1.23-t1.0.1,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture +cis-v1.23-t1.0.1,C-0181,Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate +cis-v1.23-t1.0.1,C-0182,Ensure that the --rotate-certificates argument is not set to false +cis-v1.23-t1.0.1,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true +cis-v1.23-t1.0.1,C-0184,Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers +cis-v1.23-t1.0.1,C-0185,Ensure that the cluster-admin role is only used where required +cis-v1.23-t1.0.1,C-0186,Minimize access to secrets +cis-v1.23-t1.0.1,C-0187,Minimize wildcard use in Roles and ClusterRoles +cis-v1.23-t1.0.1,C-0188,Minimize access to create pods +cis-v1.23-t1.0.1,C-0189,Ensure that default service accounts are not actively used +cis-v1.23-t1.0.1,C-0190,Ensure that Service Account Tokens are only mounted where necessary +cis-v1.23-t1.0.1,C-0191,"Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster" +cis-v1.23-t1.0.1,C-0192,Ensure that the cluster has at least one active policy control mechanism in place +cis-v1.23-t1.0.1,C-0193,Minimize the admission of privileged containers +cis-v1.23-t1.0.1,C-0194,Minimize the admission of containers wishing to share the host process ID namespace +cis-v1.23-t1.0.1,C-0195,Minimize the admission of containers wishing to share the host IPC namespace +cis-v1.23-t1.0.1,C-0196,Minimize the admission of containers wishing to share the host network namespace +cis-v1.23-t1.0.1,C-0197,Minimize the admission of containers with allowPrivilegeEscalation +cis-v1.23-t1.0.1,C-0198,Minimize the admission of root containers +cis-v1.23-t1.0.1,C-0199,Minimize the admission of containers with the NET_RAW capability +cis-v1.23-t1.0.1,C-0200,Minimize the admission of containers with added capabilities +cis-v1.23-t1.0.1,C-0201,Minimize the admission of containers with capabilities assigned +cis-v1.23-t1.0.1,C-0202,Minimize the admission of Windows HostProcess Containers +cis-v1.23-t1.0.1,C-0203,Minimize the admission of HostPath volumes +cis-v1.23-t1.0.1,C-0204,Minimize the admission of containers which use HostPorts +cis-v1.23-t1.0.1,C-0205,Ensure that the CNI in use supports Network Policies +cis-v1.23-t1.0.1,C-0206,Ensure that all Namespaces have Network Policies defined +cis-v1.23-t1.0.1,C-0207,Prefer using secrets as files over secrets as environment variables +cis-v1.23-t1.0.1,C-0208,Consider external secret storage +cis-v1.23-t1.0.1,C-0209,Create administrative boundaries between resources using namespaces +cis-v1.23-t1.0.1,C-0210,Ensure that the seccomp profile is set to docker/default in your pod definitions +cis-v1.23-t1.0.1,C-0211,Apply Security Context to Your Pods and Containers +cis-v1.23-t1.0.1,C-0212,The default namespace should not be used +SOC2,C-0260,Missing network policy +SOC2,C-0012,Applications credentials in configuration files +SOC2,C-0186,Minimize access to secrets +SOC2,C-0035,Administrative Roles +SOC2,C-0067,Audit logs enabled +SOC2,C-0263,Ingress uses TLS +MITRE,C-0002,Prevent containers from allowing command execution +MITRE,C-0007,Roles with delete capabilities +MITRE,C-0012,Applications credentials in configuration files +MITRE,C-0014,Access Kubernetes dashboard +MITRE,C-0015,List Kubernetes secrets +MITRE,C-0020,Mount service principal +MITRE,C-0021,Exposed sensitive interfaces +MITRE,C-0026,Kubernetes CronJob +MITRE,C-0031,Delete Kubernetes events +MITRE,C-0035,Administrative Roles +MITRE,C-0036,Validate admission controller (validating) +MITRE,C-0037,CoreDNS poisoning +MITRE,C-0039,Validate admission controller (mutating) +MITRE,C-0042,SSH server running inside container +MITRE,C-0045,Writable hostPath mount +MITRE,C-0048,HostPath mount +MITRE,C-0052,Instance Metadata API +MITRE,C-0053,Access container service account +MITRE,C-0054,Cluster internal networking +MITRE,C-0057,Privileged container +MITRE,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. +MITRE,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability +MITRE,C-0066,Secret/etcd encryption enabled +MITRE,C-0067,Audit logs enabled +MITRE,C-0068,PSP enabled +MITRE,C-0069,Disable anonymous access to Kubelet service +MITRE,C-0070,Enforce Kubelet client TLS authentication +NSA,C-0002,Prevent containers from allowing command execution +NSA,C-0005,API server insecure port is enabled +NSA,C-0012,Applications credentials in configuration files +NSA,C-0013,Non-root containers +NSA,C-0016,Allow privilege escalation +NSA,C-0017,Immutable container filesystem +NSA,C-0030,Ingress and Egress blocked +NSA,C-0034,Automatic mapping of service account +NSA,C-0035,Administrative Roles +NSA,C-0038,Host PID/IPC privileges +NSA,C-0041,HostNetwork access +NSA,C-0044,Container hostPort +NSA,C-0046,Insecure capabilities +NSA,C-0054,Cluster internal networking +NSA,C-0055,Linux hardening +NSA,C-0057,Privileged container +NSA,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. +NSA,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability +NSA,C-0066,Secret/etcd encryption enabled +NSA,C-0067,Audit logs enabled +NSA,C-0068,PSP enabled +NSA,C-0069,Disable anonymous access to Kubelet service +NSA,C-0070,Enforce Kubelet client TLS authentication +NSA,C-0270,Ensure CPU limits are set +NSA,C-0271,Ensure memory limits are set +cis-eks-t1.2.0,C-0066,Secret/etcd encryption enabled +cis-eks-t1.2.0,C-0067,Audit logs enabled +cis-eks-t1.2.0,C-0078,Images from allowed registry +cis-eks-t1.2.0,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root +cis-eks-t1.2.0,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root +cis-eks-t1.2.0,C-0172,Ensure that the --anonymous-auth argument is set to false +cis-eks-t1.2.0,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow +cis-eks-t1.2.0,C-0174,Ensure that the --client-ca-file argument is set as appropriate +cis-eks-t1.2.0,C-0175,Verify that the --read-only-port argument is set to 0 +cis-eks-t1.2.0,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 +cis-eks-t1.2.0,C-0177,Ensure that the --protect-kernel-defaults argument is set to true +cis-eks-t1.2.0,C-0178,Ensure that the --make-iptables-util-chains argument is set to true +cis-eks-t1.2.0,C-0179,Ensure that the --hostname-override argument is not set +cis-eks-t1.2.0,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture +cis-eks-t1.2.0,C-0181,Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate +cis-eks-t1.2.0,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true +cis-eks-t1.2.0,C-0185,Ensure that the cluster-admin role is only used where required +cis-eks-t1.2.0,C-0186,Minimize access to secrets +cis-eks-t1.2.0,C-0187,Minimize wildcard use in Roles and ClusterRoles +cis-eks-t1.2.0,C-0188,Minimize access to create pods +cis-eks-t1.2.0,C-0189,Ensure that default service accounts are not actively used +cis-eks-t1.2.0,C-0190,Ensure that Service Account Tokens are only mounted where necessary +cis-eks-t1.2.0,C-0191,"Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster" +cis-eks-t1.2.0,C-0205,Ensure that the CNI in use supports Network Policies +cis-eks-t1.2.0,C-0206,Ensure that all Namespaces have Network Policies defined +cis-eks-t1.2.0,C-0207,Prefer using secrets as files over secrets as environment variables +cis-eks-t1.2.0,C-0209,Create administrative boundaries between resources using namespaces +cis-eks-t1.2.0,C-0211,Apply Security Context to Your Pods and Containers +cis-eks-t1.2.0,C-0212,The default namespace should not be used +cis-eks-t1.2.0,C-0213,Minimize the admission of privileged containers +cis-eks-t1.2.0,C-0214,Minimize the admission of containers wishing to share the host process ID namespace +cis-eks-t1.2.0,C-0215,Minimize the admission of containers wishing to share the host IPC namespace +cis-eks-t1.2.0,C-0216,Minimize the admission of containers wishing to share the host network namespace +cis-eks-t1.2.0,C-0217,Minimize the admission of containers with allowPrivilegeEscalation +cis-eks-t1.2.0,C-0218,Minimize the admission of root containers +cis-eks-t1.2.0,C-0219,Minimize the admission of containers with added capabilities +cis-eks-t1.2.0,C-0220,Minimize the admission of containers with capabilities assigned +cis-eks-t1.2.0,C-0221,Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider +cis-eks-t1.2.0,C-0222,Minimize user access to Amazon ECR +cis-eks-t1.2.0,C-0223,Minimize cluster access to read-only for Amazon ECR +cis-eks-t1.2.0,C-0225,Prefer using dedicated EKS Service Accounts +cis-eks-t1.2.0,C-0226,Prefer using a container-optimized OS when possible +cis-eks-t1.2.0,C-0227,Restrict Access to the Control Plane Endpoint +cis-eks-t1.2.0,C-0228,Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled +cis-eks-t1.2.0,C-0229,Ensure clusters are created with Private Nodes +cis-eks-t1.2.0,C-0230,Ensure Network Policy is Enabled and set as appropriate +cis-eks-t1.2.0,C-0231,Encrypt traffic to HTTPS load balancers with TLS certificates +cis-eks-t1.2.0,C-0232,Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 +cis-eks-t1.2.0,C-0233,Consider Fargate for running untrusted workloads +cis-eks-t1.2.0,C-0234,Consider external secret storage +cis-eks-t1.2.0,C-0235,Ensure that the kubelet configuration file has permissions set to 644 or more restrictive +cis-eks-t1.2.0,C-0238,Ensure that the kubeconfig file permissions are set to 644 or more restrictive +cis-eks-t1.2.0,C-0242,Hostile multi-tenant workloads +cis-eks-t1.2.0,C-0246,Avoid use of system:masters group +cis-aks-t1.2.0,C-0078,Images from allowed registry +cis-aks-t1.2.0,C-0088,RBAC enabled +cis-aks-t1.2.0,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root +cis-aks-t1.2.0,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root +cis-aks-t1.2.0,C-0172,Ensure that the --anonymous-auth argument is set to false +cis-aks-t1.2.0,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow +cis-aks-t1.2.0,C-0174,Ensure that the --client-ca-file argument is set as appropriate +cis-aks-t1.2.0,C-0175,Verify that the --read-only-port argument is set to 0 +cis-aks-t1.2.0,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 +cis-aks-t1.2.0,C-0177,Ensure that the --protect-kernel-defaults argument is set to true +cis-aks-t1.2.0,C-0178,Ensure that the --make-iptables-util-chains argument is set to true +cis-aks-t1.2.0,C-0179,Ensure that the --hostname-override argument is not set +cis-aks-t1.2.0,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture +cis-aks-t1.2.0,C-0182,Ensure that the --rotate-certificates argument is not set to false +cis-aks-t1.2.0,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true +cis-aks-t1.2.0,C-0185,Ensure that the cluster-admin role is only used where required +cis-aks-t1.2.0,C-0186,Minimize access to secrets +cis-aks-t1.2.0,C-0187,Minimize wildcard use in Roles and ClusterRoles +cis-aks-t1.2.0,C-0188,Minimize access to create pods +cis-aks-t1.2.0,C-0189,Ensure that default service accounts are not actively used +cis-aks-t1.2.0,C-0190,Ensure that Service Account Tokens are only mounted where necessary +cis-aks-t1.2.0,C-0201,Minimize the admission of containers with capabilities assigned +cis-aks-t1.2.0,C-0205,Ensure that the CNI in use supports Network Policies +cis-aks-t1.2.0,C-0206,Ensure that all Namespaces have Network Policies defined +cis-aks-t1.2.0,C-0207,Prefer using secrets as files over secrets as environment variables +cis-aks-t1.2.0,C-0208,Consider external secret storage +cis-aks-t1.2.0,C-0209,Create administrative boundaries between resources using namespaces +cis-aks-t1.2.0,C-0211,Apply Security Context to Your Pods and Containers +cis-aks-t1.2.0,C-0212,The default namespace should not be used +cis-aks-t1.2.0,C-0213,Minimize the admission of privileged containers +cis-aks-t1.2.0,C-0214,Minimize the admission of containers wishing to share the host process ID namespace +cis-aks-t1.2.0,C-0215,Minimize the admission of containers wishing to share the host IPC namespace +cis-aks-t1.2.0,C-0216,Minimize the admission of containers wishing to share the host network namespace +cis-aks-t1.2.0,C-0217,Minimize the admission of containers with allowPrivilegeEscalation +cis-aks-t1.2.0,C-0218,Minimize the admission of root containers +cis-aks-t1.2.0,C-0219,Minimize the admission of containers with added capabilities +cis-aks-t1.2.0,C-0235,Ensure that the kubelet configuration file has permissions set to 644 or more restrictive +cis-aks-t1.2.0,C-0238,Ensure that the kubeconfig file permissions are set to 644 or more restrictive +cis-aks-t1.2.0,C-0239,Prefer using dedicated AKS Service Accounts +cis-aks-t1.2.0,C-0240,Ensure Network Policy is Enabled and set as appropriate +cis-aks-t1.2.0,C-0241,Use Azure RBAC for Kubernetes Authorization. +cis-aks-t1.2.0,C-0242,Hostile multi-tenant workloads +cis-aks-t1.2.0,C-0243,Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider +cis-aks-t1.2.0,C-0244,Ensure Kubernetes Secrets are encrypted +cis-aks-t1.2.0,C-0245,Encrypt traffic to HTTPS load balancers with TLS certificates +cis-aks-t1.2.0,C-0247,Restrict Access to the Control Plane Endpoint +cis-aks-t1.2.0,C-0248,Ensure clusters are created with Private Nodes +cis-aks-t1.2.0,C-0249,Restrict untrusted workloads +cis-aks-t1.2.0,C-0250,Minimize cluster access to read-only for Azure Container Registry (ACR) +cis-aks-t1.2.0,C-0251,Minimize user access to Azure Container Registry (ACR) +cis-aks-t1.2.0,C-0252,Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled +cis-aks-t1.2.0,C-0254,Enable audit Logs +ArmoBest,C-0002,Prevent containers from allowing command execution +ArmoBest,C-0005,API server insecure port is enabled +ArmoBest,C-0012,Applications credentials in configuration files +ArmoBest,C-0013,Non-root containers +ArmoBest,C-0016,Allow privilege escalation +ArmoBest,C-0017,Immutable container filesystem +ArmoBest,C-0030,Ingress and Egress blocked +ArmoBest,C-0034,Automatic mapping of service account +ArmoBest,C-0035,Administrative Roles +ArmoBest,C-0038,Host PID/IPC privileges +ArmoBest,C-0041,HostNetwork access +ArmoBest,C-0044,Container hostPort +ArmoBest,C-0046,Insecure capabilities +ArmoBest,C-0049,Network mapping +ArmoBest,C-0054,Cluster internal networking +ArmoBest,C-0055,Linux hardening +ArmoBest,C-0057,Privileged container +ArmoBest,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. +ArmoBest,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability +ArmoBest,C-0061,Pods in default namespace +ArmoBest,C-0062,Sudo in container entrypoint +ArmoBest,C-0063,Portforwarding privileges +ArmoBest,C-0065,No impersonation +ArmoBest,C-0066,Secret/etcd encryption enabled +ArmoBest,C-0067,Audit logs enabled +ArmoBest,C-0068,PSP enabled +ArmoBest,C-0069,Disable anonymous access to Kubelet service +ArmoBest,C-0070,Enforce Kubelet client TLS authentication +ArmoBest,C-0078,Images from allowed registry +ArmoBest,C-0079,CVE-2022-0185-linux-kernel-container-escape +ArmoBest,C-0081,CVE-2022-24348-argocddirtraversal +ArmoBest,C-0087,CVE-2022-23648-containerd-fs-escape +ArmoBest,C-0089,CVE-2022-3172-aggregated-API-server-redirect +ArmoBest,C-0091,CVE-2022-47633-kyverno-signature-bypass +ArmoBest,C-0236,Verify image signature +ArmoBest,C-0237,Check if signature exists +ArmoBest,C-0270,Ensure CPU limits are set +ArmoBest,C-0271,Ensure memory limits are set +WorkloadScan,C-0078,Images from allowed registry +WorkloadScan,C-0236,Verify image signature +WorkloadScan,C-0237,Check if signature exists +WorkloadScan,C-0045,Writable hostPath mount +WorkloadScan,C-0048,HostPath mount +WorkloadScan,C-0257,Workload with PVC access +WorkloadScan,C-0207,Prefer using secrets as files over secrets as environment variables +WorkloadScan,C-0034,Automatic mapping of service account +WorkloadScan,C-0012,Applications credentials in configuration files +WorkloadScan,C-0041,HostNetwork access +WorkloadScan,C-0260,Missing network policy +WorkloadScan,C-0044,Container hostPort +WorkloadScan,C-0038,Host PID/IPC privileges +WorkloadScan,C-0046,Insecure capabilities +WorkloadScan,C-0013,Non-root containers +WorkloadScan,C-0016,Allow privilege escalation +WorkloadScan,C-0017,Immutable container filesystem +WorkloadScan,C-0055,Linux hardening +WorkloadScan,C-0057,Privileged container +WorkloadScan,C-0270,Ensure CPU limits are set +WorkloadScan,C-0271,Ensure memory limits are set +security,C-0005,API server insecure port is enabled +security,C-0012,Applications credentials in configuration files +security,C-0013,Non-root containers +security,C-0016,Allow privilege escalation +security,C-0017,Immutable container filesystem +security,C-0034,Automatic mapping of service account +security,C-0035,Administrative Roles +security,C-0038,Host PID/IPC privileges +security,C-0041,HostNetwork access +security,C-0044,Container hostPort +security,C-0045,Writable hostPath mount +security,C-0046,Insecure capabilities +security,C-0048,HostPath mount +security,C-0057,Privileged container +security,C-0066,Secret/etcd encryption enabled +security,C-0069,Disable anonymous access to Kubelet service +security,C-0070,Enforce Kubelet client TLS authentication +security,C-0074,Container runtime socket mounted +security,C-0211,Apply Security Context to Your Pods and Containers +security,C-0255,Workload with secret access +security,C-0256,Exposure to internet +security,C-0257,Workload with PVC access +security,C-0258,Workload with ConfigMap access +security,C-0259,Workload with credential access +security,C-0260,Missing network policy +security,C-0261,ServiceAccount token mounted +security,C-0262,Anonymous user has RoleBinding +security,C-0265,system:authenticated user has elevated roles +security,C-0267,Workload with cluster takeover roles +security,C-0270,Ensure CPU limits are set +security,C-0271,Ensure memory limits are set +security,C-0272,Workload with administrative roles +security,C-0273,Outdated Kubernetes version +ClusterScan,C-0066,Secret/etcd encryption enabled +ClusterScan,C-0088,RBAC enabled +ClusterScan,C-0067,Audit logs enabled +ClusterScan,C-0005,API server insecure port is enabled +ClusterScan,C-0262,Anonymous user has RoleBinding +ClusterScan,C-0265,system:authenticated user has elevated roles +ClusterScan,C-0015,List Kubernetes secrets +ClusterScan,C-0002,Prevent containers from allowing command execution +ClusterScan,C-0007,Roles with delete capabilities +ClusterScan,C-0063,Portforwarding privileges +ClusterScan,C-0036,Validate admission controller (validating) +ClusterScan,C-0039,Validate admission controller (mutating) +ClusterScan,C-0035,Administrative Roles +ClusterScan,C-0188,Minimize access to create pods +ClusterScan,C-0187,Minimize wildcard use in Roles and ClusterRoles +ClusterScan,C-0012,Applications credentials in configuration files +ClusterScan,C-0260,Missing network policy +ClusterScan,C-0256,Exposure to internet +ClusterScan,C-0038,Host PID/IPC privileges +ClusterScan,C-0041,HostNetwork access +ClusterScan,C-0048,HostPath mount +ClusterScan,C-0057,Privileged container +ClusterScan,C-0013,Non-root containers diff --git a/releaseDev/allcontrols.json b/releaseDev/allcontrols.json new file mode 100644 index 000000000..fb7059b5f --- /dev/null +++ b/releaseDev/allcontrols.json @@ -0,0 +1,4656 @@ +{ + "name": "AllControls", + "description": "Contains all the controls from all the frameworks", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "insecure-port-flag", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Roles with delete capabilities", + "attributes": { + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-excessive-delete-rights-v1", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + } + ] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + } + ] + }, + { + "name": "Access Kubernetes dashboard", + "attributes": { + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", + "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", + "controlID": "C-0014", + "baseScore": 2.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-access-dashboard-subject-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" + }, + { + "name": "rule-access-dashboard-wl-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-allow-privilege-escalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "immutable-container-filesystem", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" + } + ] + }, + { + "name": "Configured readiness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "controlID": "C-0018", + "example": "@controls/examples/c018.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "configured-readiness-probe", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Readiness probe is not configured", + "remediation": "Ensure Readiness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Mount service principal", + "attributes": { + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", + "controlID": "C-0020", + "baseScore": 4.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-mount-potential-credentials-paths", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "relevantCloudProviders": [ + "EKS", + "GKE", + "AKS" + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tstart_of_path := volumes_data[\"start_of_path\"]\n result := is_unsafe_paths(volume, start_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"start_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"start_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"start_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, start_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [start_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" + } + ] + }, + { + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", + "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", + "controlID": "C-0021", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "exposed-sensitive-interfaces-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "configInputs": [ + "settings.postureControlInputs.sensitiveInterfaces" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveInterfaces", + "name": "Sensitive interfaces", + "description": "List of known software interfaces that should not generally be exposed to the Internet." + } + ], + "description": "fails if known interfaces have exposed services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" + } + ] + }, + { + "name": "Kubernetes CronJob", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "test": "We list all CronJobs that exist in cluster for the user to approve.", + "controlID": "C-0026", + "baseScore": 1.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rule-deny-cronjobs", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob" + }, + "ruleLanguage": "rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if it's cronjob", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# alert cronjobs\n\n# handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ingress-and-egress-blocked", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if there are no ingress and egress defined for pod", + "remediation": "Make sure you define ingress and egress policies for all your Pods", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" + } + ] + }, + { + "name": "Delete Kubernetes events", + "attributes": { + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", + "test": "List who has delete/deletecollection RBAC permissions on events.", + "controlID": "C-0031", + "baseScore": 4.0, + "example": "@controls/examples/c031.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-delete-k8s-events-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can delete events", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Validate admission controller (validating)", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0036", + "baseScore": 3.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-validating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Validate admission controller" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns validating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-pid-ipc-privileges", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + } + ] + }, + { + "name": "Validate admission controller (mutating)", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0039", + "baseScore": 4.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-mutating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Validate admission controller" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns mutating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-network-access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + } + ] + }, + { + "name": "SSH server running inside container", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", + "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", + "controlID": "C-0042", + "baseScore": 3.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-ssh-to-pod-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "denies pods with SSH ports opened(22/222)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-rw-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + }, + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines if any workload contains a hostPath volume with rw permissions", + "remediation": "Set the readOnly field of the mount to true", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"deletePaths\": failed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n}" + } + ] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "insecure-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + } + ] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-any-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" + } + ] + }, + { + "name": "Network mapping", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0049", + "baseScore": 3.0, + "example": "@controls/examples/c049.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Instance Metadata API", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", + "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", + "controlID": "C-0052", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "instance-metadata-api-access", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "cloudProviderInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" + } + ] + }, + { + "name": "Access container service account", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", + "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", + "controlID": "C-0053", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "access-container-service-account-v1", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which service accounts can be used to access other resources in the cluster", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" + } + ] + }, + { + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "linux-hardening", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define any linux security hardening", + "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" + } + ] + }, + { + "name": "Configured liveness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "controlID": "C-0056", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "configured-liveness-probe", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Liveness probe is not configured", + "remediation": "Ensure Liveness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" + } + ] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "nginx-ingress-snippet-annotation-vulnerability", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" + } + ] + }, + { + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + }, + { + "name": "Sudo in container entrypoint", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "test": "Check that there is no 'sudo' in the container entrypoint", + "controlID": "C-0062", + "baseScore": 5.0, + "example": "@controls/examples/c062.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "sudo-in-container-entrypoint", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, start_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-portforward-v1", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "No impersonation", + "attributes": { + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", + "controlID": "C-0065", + "baseScore": 6.0, + "example": "@controls/examples/c065.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" + }, + { + "name": "psp-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Naked pods", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", + "long_description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "test": "Test if pods are not associated with Deployment, ReplicaSet etc. If not, fail.", + "controlID": "C-0073", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "naked-pods", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", + "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "containers-mounting-docker-socket", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/run/containerd/containerd.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/crio/crio.sock\"\n}\n" + } + ] + }, + { + "name": "Image pull policy on latest tag", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", + "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", + "test": "If imagePullPolicy = always pass, else fail.", + "controlID": "C-0075", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "image-pull-policy-is-not-set-to-always", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" + } + ] + }, + { + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "controlID": "C-0076", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "label-usage-for-resources", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.recommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.recommendedLabels", + "name": "Recommended Labels", + "description": "Kubescape checks that workloads have at least one label that identifies semantic attributes." + } + ], + "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" + } + ] + }, + { + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", + "controlID": "C-0077", + "baseScore": 2.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "k8s-common-labels-usage", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.k8sRecommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.k8sRecommendedLabels", + "name": "Kubernetes Recommended Labels", + "description": "Kubescape checks that workloads have at least one of this list of configurable labels, as recommended in the Kubernetes documentation." + } + ], + "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" + } + ] + }, + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useUntilKubescapeVersion": "v2.3.8" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + }, + { + "name": "container-image-repository-v1", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useFromKubescapeVersion": "v2.9.0" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" + } + ] + }, + { + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0079", + "baseScore": 4.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-0185", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "LinuxKernelVariables" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n\n parsed_kernel_version_arr := parse_kernel_version_to_array(node.status.nodeInfo.kernelVersion)\n is_azure := parsed_kernel_version_arr[4] == \"azure\"\n\n is_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure)\n\n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n \"reviewPaths\": [\"kernelVersion\"],\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\n# General Kernel versions are between 5.1.1 and 5.16.2\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == false\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 16\n parsed_kernel_version_arr[2] < 2\n}\n\n# Azure kernel version with is 5.4.0-1067-azure\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == true\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 4\n parsed_kernel_version_arr[2] == 0\n parsed_kernel_version_arr[3] < 1067\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}\n\nparse_kernel_version_to_array(kernel_version_str) = output {\n\tversion_triplet := regex.find_n(`(\\d+\\.\\d+\\.\\d+)`, kernel_version_str,-1)\n version_triplet_array := split(version_triplet[0],\".\")\n\n build_vendor := regex.find_n(`-(\\d+)-(\\w+)`, kernel_version_str,-1)\n build_vendor_array := split(build_vendor[0],\"-\")\n\n output := [to_number(version_triplet_array[0]),to_number(version_triplet_array[1]),to_number(version_triplet_array[2]),to_number(build_vendor_array[1]),build_vendor_array[2]]\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" + } + ] + }, + { + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0081", + "baseScore": 4.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "CVE-2022-24348", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + } + ] + }, + { + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", + "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", + "controlID": "C-0087", + "baseScore": 7.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-23648", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" + } + ] + }, + { + "name": "RBAC enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rbac-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" + }, + { + "name": "rbac-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CVE-2022-39328-grafana-auth-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", + "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", + "controlID": "C-0090", + "baseScore": 9.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "CVE-2022-39328", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tclean_image := replace(image,\"-ubuntu\",\"\")\n\tversion := split(clean_image, \":\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 9\n\tminorVersion == 2\n\tsubVersion < 4\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + } + ] + }, + { + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", + "controlID": "C-0091", + "baseScore": 8.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "CVE-2022-47633", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + } + ] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "anonymous-access-enabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in case anonymous or unauthenticated user has any rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow anonymous users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n subject := rolebinding.subjects[i]\n isAnonymous(subject)\n delete_path := sprintf(\"subjects[%d]\", [i])\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"deletePaths\": [delete_path],\n \"failedPaths\": [delete_path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(subject) {\n subject.name == \"system:anonymous\"\n}\n\nisAnonymous(subject) {\n subject.name == \"system:unauthenticated\"\n}\n" + } + ] + }, + { + "controlID": "C-0265", + "name": "Authenticated user has sensitive permissions", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "system-authenticated-allowed-to-take-over-cluster", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in system:authenticated user has cluster takeover rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow system:authenticated users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n subjectVector := input[_]\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\n subject := rolebinding.subjects[k]\n # Check if the subject is gourp\n subject.kind == \"Group\"\n # Check if the subject is system:authenticated\n subject.name == \"system:authenticated\"\n\n\n # Find the bound roles\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n # Check if the role and rolebinding bound\n is_same_role_and_binding(role, rolebinding)\n\n\n # Check if the role has access to workloads, exec, attach, portforward\n\trule := role.rules[p]\n rule.resources[l] in [\"*\",\"pods\", \"pods/exec\", \"pods/attach\", \"pods/portforward\",\"deployments\",\"statefulset\",\"daemonset\",\"jobs\",\"cronjobs\",\"nodes\",\"secrets\"]\n\n\tfinalpath := array.concat([\"\"], [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [i]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": \"system:authenticated has sensitive roles\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\" : subjectVector\n\t\t},\n\t}\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"RoleBinding\"\n role.kind == \"Role\"\n rolebinding.metadata.namespace == role.metadata.namespace\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"ClusterRoleBinding\"\n role.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}" + } + ] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-cpu-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU limits are not set.", + "remediation": "Ensure CPU limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-memory-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory limits are not set.", + "remediation": "Ensure memory limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0005", + "C-0007", + "C-0012", + "C-0013", + "C-0014", + "C-0015", + "C-0016", + "C-0017", + "C-0018", + "C-0020", + "C-0021", + "C-0026", + "C-0030", + "C-0031", + "C-0034", + "C-0035", + "C-0036", + "C-0038", + "C-0039", + "C-0041", + "C-0042", + "C-0044", + "C-0045", + "C-0046", + "C-0048", + "C-0049", + "C-0052", + "C-0053", + "C-0054", + "C-0055", + "C-0056", + "C-0057", + "C-0058", + "C-0059", + "C-0061", + "C-0062", + "C-0063", + "C-0065", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0073", + "C-0074", + "C-0075", + "C-0076", + "C-0077", + "C-0078", + "C-0079", + "C-0081", + "C-0087", + "C-0088", + "C-0090", + "C-0091", + "C-0262", + "C-0265", + "C-0270", + "C-0271" + ] +} \ No newline at end of file diff --git a/releaseDev/armobest.json b/releaseDev/armobest.json new file mode 100644 index 000000000..79de5e4f4 --- /dev/null +++ b/releaseDev/armobest.json @@ -0,0 +1,3066 @@ +{ + "name": "ArmoBest", + "description": "", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "insecure-port-flag", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + } + ] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + } + ] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-allow-privilege-escalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "immutable-container-filesystem", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" + } + ] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ingress-and-egress-blocked", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if there are no ingress and egress defined for pod", + "remediation": "Make sure you define ingress and egress policies for all your Pods", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" + } + ] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-pid-ipc-privileges", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + } + ] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-network-access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "insecure-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + } + ] + }, + { + "name": "Network mapping", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0049", + "baseScore": 3.0, + "example": "@controls/examples/c049.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "linux-hardening", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define any linux security hardening", + "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" + } + ] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "nginx-ingress-snippet-annotation-vulnerability", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" + } + ] + }, + { + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + }, + { + "name": "Sudo in container entrypoint", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "test": "Check that there is no 'sudo' in the container entrypoint", + "controlID": "C-0062", + "baseScore": 5.0, + "example": "@controls/examples/c062.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "sudo-in-container-entrypoint", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, start_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-portforward-v1", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "No impersonation", + "attributes": { + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", + "controlID": "C-0065", + "baseScore": 6.0, + "example": "@controls/examples/c065.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" + }, + { + "name": "psp-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useUntilKubescapeVersion": "v2.3.8" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + }, + { + "name": "container-image-repository-v1", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useFromKubescapeVersion": "v2.9.0" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" + } + ] + }, + { + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0079", + "baseScore": 4.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-0185", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "LinuxKernelVariables" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n\n parsed_kernel_version_arr := parse_kernel_version_to_array(node.status.nodeInfo.kernelVersion)\n is_azure := parsed_kernel_version_arr[4] == \"azure\"\n\n is_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure)\n\n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n \"reviewPaths\": [\"kernelVersion\"],\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\n# General Kernel versions are between 5.1.1 and 5.16.2\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == false\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 16\n parsed_kernel_version_arr[2] < 2\n}\n\n# Azure kernel version with is 5.4.0-1067-azure\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == true\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 4\n parsed_kernel_version_arr[2] == 0\n parsed_kernel_version_arr[3] < 1067\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}\n\nparse_kernel_version_to_array(kernel_version_str) = output {\n\tversion_triplet := regex.find_n(`(\\d+\\.\\d+\\.\\d+)`, kernel_version_str,-1)\n version_triplet_array := split(version_triplet[0],\".\")\n\n build_vendor := regex.find_n(`-(\\d+)-(\\w+)`, kernel_version_str,-1)\n build_vendor_array := split(build_vendor[0],\"-\")\n\n output := [to_number(version_triplet_array[0]),to_number(version_triplet_array[1]),to_number(version_triplet_array[2]),to_number(build_vendor_array[1]),build_vendor_array[2]]\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" + } + ] + }, + { + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0081", + "baseScore": 4.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "CVE-2022-24348", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + } + ] + }, + { + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", + "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", + "controlID": "C-0087", + "baseScore": 7.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-23648", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" + } + ] + }, + { + "name": "CVE-2022-3172-aggregated-API-server-redirect", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [] + }, + "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", + "controlID": "C-0089", + "baseScore": 3.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-3172", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apiregistration.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "APIService" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "apiserverinfo.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "APIServerInfo" + ] + } + ], + "ruleDependencies": [], + "description": "List aggregated API server APIServices if kube-api-server version is vulnerable to CVE-2022-3172", + "remediation": "Upgrade the Kubernetes version to one of the fixed versions. The following versions are fixed: `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\tversion = get_api_server_version(api_infos)\n\tis_api_server_version_affected(version)\n\n\t# Find the service that exposes the extended API\n\tservices = [obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\tcount(services) == 1\n\tservice = services[0]\n\n\tmsg := {\n\t\t\"alertMessage\": \"the following pair of APIService and Service may redirect client traffic to any URL\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj, service]},\n\t}\n}\n\n# current kubescpae version (v2.0.171) still not support this resource\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tsemver.is_valid(v)\n\tversion = v\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tnot semver.is_valid(v)\n\tversion := \"\"\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) != 1\n\tversion = \"\"\n}\n\nis_api_server_version_affected(version) {\n\tversion == \"\"\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.25.0\") == 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.24.0\") >= 0\n\tsemver.compare(version, \"1.24.4\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.23.0\") >= 0\n\tsemver.compare(version, \"1.23.10\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.22.0\") >= 0\n\tsemver.compare(version, \"1.22.13\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.21.14\") <= 0\n}\n", + "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\t# Find the service that exposes the extended API\n\tservices = [ obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\n\tmsg := {\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n" + } + ] + }, + { + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", + "controlID": "C-0091", + "baseScore": 8.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "CVE-2022-47633", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + } + ] + }, + { + "controlID": "C-0236", + "name": "Verify image signature", + "description": "Verifies the signature of each image with given public keys", + "long_description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "manual_test": "", + "references": [], + "attributes": { + "actionRequired": "configuration" + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "verify-image-signature", + "attributes": { + "useFromKubescapeVersion": "v2.1.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "ruleQuery": "armo_builtins", + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.trustedCosignPublicKeys", + "name": "Trusted Cosign public keys", + "description": "A list of trusted Cosign public keys that are used for validating container image signatures." + } + ], + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.containers[%v].image\", [i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0237", + "name": "Check if signature exists", + "description": "Ensures that all images contain some signature", + "long_description": "Verifies that each image is signed", + "remediation": "Replace the image with a signed image", + "manual_test": "", + "references": [], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "has-image-signature", + "attributes": { + "useFromKubescapeVersion": "v2.1.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Ensures that all images contain some signature", + "remediation": "Replace the image with a signed image", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" + } + ] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-cpu-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU limits are not set.", + "remediation": "Ensure CPU limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-memory-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory limits are not set.", + "remediation": "Ensure memory limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0005", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0030", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0046", + "C-0049", + "C-0054", + "C-0055", + "C-0057", + "C-0058", + "C-0059", + "C-0061", + "C-0062", + "C-0063", + "C-0065", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0078", + "C-0079", + "C-0081", + "C-0087", + "C-0089", + "C-0091", + "C-0236", + "C-0237", + "C-0270", + "C-0271" + ] +} \ No newline at end of file diff --git a/releaseDev/attack_tracks.json b/releaseDev/attack_tracks.json new file mode 100644 index 000000000..f2679f12e --- /dev/null +++ b/releaseDev/attack_tracks.json @@ -0,0 +1,109 @@ +[ + { + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "external-workload-with-cluster-takeover-roles" + }, + "spec": { + "version": null, + "data": { + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", + "subSteps": [ + { + "name": "Cluster Access", + "description": "An attacker has access to sensitive information and can leverage them by creating pods in the cluster." + } + ] + } + } + }, + { + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "external-database-without-authentication" + }, + "spec": { + "version": null, + "data": { + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", + "subSteps": [ + { + "name": "Unauthenticated Access", + "description": "An unauthenticated attacker can access resources." + } + ] + } + } + }, + { + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "service-destruction" + }, + "spec": { + "version": null, + "data": { + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", + "subSteps": [ + { + "name": "Denial of service", + "description": "An attacker can overload the workload, making it unavailable." + } + ] + } + } + }, + { + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "workload-external-track" + }, + "spec": { + "version": null, + "data": { + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", + "subSteps": [ + { + "name": "Execution (Vulnerable Image)", + "description": "An attacker can execute malicious code by exploiting vulnerable images.", + "checksVulnerabilities": true, + "subSteps": [ + { + "name": "Data Collection", + "description": "An attacker can gather data." + }, + { + "name": "Secret Access", + "description": "An attacker can steal secrets." + }, + { + "name": "Credential access", + "description": "An attacker can steal account names and passwords." + }, + { + "name": "Privilege Escalation (Node)", + "description": "An attacker can gain permissions and access node resources." + }, + { + "name": "Persistence", + "description": "An attacker can create a foothold." + }, + { + "name": "Lateral Movement (Network)", + "description": "An attacker can move through the network." + } + ] + } + ] + } + } + } +] \ No newline at end of file diff --git a/releaseDev/cis-aks-t1.2.0.json b/releaseDev/cis-aks-t1.2.0.json new file mode 100644 index 000000000..8214ae81c --- /dev/null +++ b/releaseDev/cis-aks-t1.2.0.json @@ -0,0 +1,4282 @@ +{ + "name": "cis-aks-t1.2.0", + "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", + "attributes": { + "armoBuiltin": true, + "version": "v1.2.0" + }, + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "2": { + "name": "Master (Control Plane) Configuration", + "id": "2", + "subSections": { + "1": { + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0254" + ] + } + } + }, + "3": { + "name": "Worker Nodes", + "id": "3", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + }, + "2": { + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0182", + "C-0183" + ] + } + } + }, + "4": { + "name": "Policies", + "id": "4", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190" + ] + }, + "2": { + "name": "Pod Security Standards", + "id": "4.2", + "controlsIDs": [ + "C-0201", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219" + ] + }, + "3": { + "name": "Azure Policy / OPA", + "id": "4.3", + "controlsIDs": [] + }, + "4": { + "name": "CNI Plugin", + "id": "4.4", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "5": { + "name": "Secrets Management", + "id": "4.5", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "6": { + "name": "Extensible Admission Control", + "id": "4.6", + "controlsIDs": [] + }, + "7": { + "name": "General Policies", + "id": "4.7", + "controlsIDs": [ + "C-0209", + "C-0211", + "C-0212" + ] + } + } + }, + "5": { + "name": "Managed services", + "id": "5", + "subSections": { + "1": { + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0078", + "C-0243", + "C-0250", + "C-0251" + ] + }, + "2": { + "name": "Access and identity options for Azure Kubernetes Service (AKS)", + "id": "5.2", + "controlsIDs": [ + "C-0239", + "C-0241" + ] + }, + "3": { + "name": "Key Management Service (KMS)", + "id": "5.3", + "controlsIDs": [ + "C-0244" + ] + }, + "4": { + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0240", + "C-0245", + "C-0247", + "C-0248", + "C-0252" + ] + }, + "5": { + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0088" + ] + }, + "6": { + "name": "Other Cluster Configurations", + "id": "5.6", + "controlsIDs": [ + "C-0242", + "C-0249" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Use approved container registries.", + "remediation": "If you are using Azure Container Registry you have this option:\n\n For other non-AKS repos using admission controllers or Azure Policy will also work.\n\n Limiting or locking down egress traffic is also recommended:\n", + "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useUntilKubescapeVersion": "v2.3.8" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + }, + { + "name": "container-image-repository-v1", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useFromKubescapeVersion": "v2.9.0" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" + } + ], + "references": [ + "\n\n \n\n " + ], + "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry." + }, + { + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with Azure AD", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "long_description": "Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators.", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rbac-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" + }, + { + "name": "rbac-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" + } + ], + "references": [ + "\n\n " + ] + }, + { + "controlID": "C-0167", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0171", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0172", + "name": "CIS-3.2.1 Ensure that the --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"anonymous\": \"enabled\": false\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--anonymous-auth=false\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*anonymous\":{\"enabled\":false}\"` by extracting the live configuration from the nodes running kubelet.\\*\\*See detailed step-by-step configmap procedures in[Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": { \"anonymous\": { \"enabled\": false }` argument is set to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"anonymous\":{\"enabled\":false}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0173", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\"... \"webhook\":{\"enabled\":true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--authorization-mode=Webhook\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"authentication\": \"webhook\": \"enabled\"` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": {\"webhook\": { \"enabled\": is set to true`.\n\n If the `\"authentication\": {\"mode\": {` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `\"authentication\": {\"mode\": {` to something other than `AlwaysAllow`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"webhook\":{\"enabled\":true}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-authorization-mode-alwaysAllow", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "Change authorization mode to Webhook.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [\"authorization.mode\"],\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0174", + "name": "CIS-3.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile:\" to the location of the client CA file.\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--client-ca-file=\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"x509\": {\"clientCAFile:\"` set to the location of the client certificate authority file.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"x509\": {\"clientCAFile:\"` argument exists and is set to the location of the client certificate authority file.\n\n If the `\"x509\": {\"clientCAFile:\"` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `\"authentication\": { \"x509\": {\"clientCAFile:\"` to the location of the client certificate authority file.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication.. x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0175", + "name": "CIS-3.2.4 Ensure that the --read-only-port is secured", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\nreadOnlyPort to 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For all remediations:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "read-only-port-enabled-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet has read-only port enabled.", + "remediation": "Start the kubelet with the --read-only-port flag set to 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"reviewPaths\": [\"readOnlyPort\"],\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0176", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-streaming-connection-idle-timeout", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if a kubelet has not disabled timeouts on streaming connections", + "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0177", + "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-protect-kernel-defaults", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the --protect-kernel-defaults argument is set to true.", + "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"protectKernelDefaults\"],\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0178", + "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `makeIPTablesUtilChains` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that if the `makeIPTablesUtilChains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-ip-tables", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensures that the --make-iptables-util-chains argument is set to true.", + "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0179", + "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", + "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-hostname-override", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --hostname-override argument is not set.", + "remediation": "Unset the --hostname-override argument.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0180", + "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "See the AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-event-qps", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", + "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"eventRecordQPS\"],\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0182", + "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not set to false", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", + "references": [ + "\n\n \n\n \n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-rotate-certificates", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --rotate-certificates argument is not set to false.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"rotateCertificates\"],\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0183", + "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateKubeletServerCertificate\":true\n\n```\n **Remediation Method 2:**\n\n If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`.\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `RotateKubeletServerCertificate` is set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-rotate-kubelet-server-certificate", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`RotateKubeletServerCertificate=true`, args[i])\n}\n" + } + ] + }, + { + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "cluster-admin-role", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\n# regal ignore:rule-length\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-create-pod", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can create pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-default-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" + }, + { + "name": "namespace-without-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Namespace", + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace does not have service accounts (not incluiding default)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "controlID": "C-0201", + "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-restricted-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-restricted-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0205", + "name": "CIS-4.4.1 Ensure latest CNI version is used", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "manual_test": "Ensure CNI plugin supports network policies.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None.", + "default_value": "This will depend on the CNI plugin in use.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-cni-in-use-supports-network-policies", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [], + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" + } + ] + }, + { + "name": "CIS-4.4.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "default_value": "By default, network policies are not created.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.5.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "category": { + "name": "Workload", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-secrets-in-env-var", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if Pods have secrets in environment variables", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "CIS-4.5.2 Consider external secret storage", + "controlID": "C-0208", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "manual_test": "Review your secrets management implementation.", + "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 5, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "external-secret-storage", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" + } + ] + }, + { + "name": "CIS-4.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Azure AKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "\n\n \n\n \n\n ." + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "default_value": "When you create an AKS cluster, the following namespaces are available:\n\n NAMESPACES\nNamespace Description\ndefault Where pods and deployments are created by default when none is provided. In smaller environments, you can deploy applications directly into the default namespace without creating additional logical separations. When you interact with the Kubernetes API, such as with kubectl get pods, the default namespace is used when none is specified.\nkube-system Where core resources exist, such as network features like DNS and proxy, or the Kubernetes dashboard. You typically don't deploy your own applications into this namespace.\nkube-public Typically not used, but can be used for resources to be visible across the whole cluster, and can be viewed by any user.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-namespaces", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + } + ], + "ruleDependencies": [], + "description": "lists all namespaces for users to review", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.7.2 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this:\n\n \n```\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: restricted\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n # Assume that persistentVolumes set up by the cluster admin are safe to use.\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'MustRunAsNonRoot'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n\n```\n This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added.\n\n Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + }, + { + "name": "immutable-container-filesystem", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" + }, + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + }, + { + "name": "drop-capability-netraw", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "set-seLinuxOptions", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if workload and container do not define any seLinuxOptions", + "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-seccomp-profile", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-procmount-default", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n" + }, + { + "name": "set-fsgroup-value", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n" + }, + { + "name": "set-fsgroupchangepolicy-value", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" + }, + { + "name": "set-sysctls-params", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.sysctls is not set.", + "remediation": "Set securityContext.sysctls params", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has sysctls set\n not pod.spec.securityContext.sysctls\n\n path := \"spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.sysctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has sysctls set\n not wl.spec.template.spec.securityContext.sysctls\n\n path := \"spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.sysctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has sysctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.sysctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "set-supplementalgroups-values", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n\tnot pod.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n\tnot wl.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n\tnot cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n" + }, + { + "name": "rule-allow-privilege-escalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "CIS-4.7.3 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get all -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "rolebinding-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "role-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "configmap-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "endpoints-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Endpoints" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "persistentvolumeclaim-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PersistentVolumeClaim" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "podtemplate-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodTemplate" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "replicationcontroller-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ReplicationController" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "service-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "serviceaccount-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "endpointslice-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "discovery.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "EndpointSlice" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "horizontalpodautoscaler-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "autoscaling" + ], + "apiVersions": [ + "v2" + ], + "resources": [ + "HorizontalPodAutoscaler" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "lease-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "coordination.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Lease" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "csistoragecapacity-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "CSIStorageCapacity" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ingress-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "poddisruptionbudget-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodDisruptionBudget" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-secret-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Secret" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + }, + { + "controlID": "C-0213", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n as an alternative AZ CLI can be used:\n\n \n```\naz aks list --output yaml\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 8.0, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, when you provision an AKS cluster, the value of \"enablePodSecurityPolicy\" is null.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "psp-deny-privileged-container", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0214", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostpid", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0215", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostipc", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0216", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostnetwork", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0217", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-allowprivilegeescalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0218", + "name": "CIS-4.2.6 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-root-container", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" + } + ] + }, + { + "controlID": "C-0219", + "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-allowed-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0235", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "None.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0238", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "description": "If `kubelet` is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0239", + "name": "CIS-5.2.1 Prefer using dedicated AKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-default-service-accounts-has-only-default-roles", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"deletePaths\": [sprintf(\"subjects[%d]\", [i])],\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0240", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", + "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "rule-cni-enabled-aks", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if cni is not enabled like defined in:\n# https://learn.microsoft.com/en-us/azure/aks/use-network-policies#create-an-aks-cluster-and-enable-network-policy\ndeny[msga] {\n\tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot cni_enabled_aks(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"cni is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_describe,\n\t\t},\n\t}\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"azure\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"kubenet\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n" + } + ] + }, + { + "controlID": "C-0241", + "name": "CIS-5.2.2 Use Azure RBAC for Kubernetes Authorization", + "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", + "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", + "remediation": "Set Azure RBAC as access system.", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-azure-rbac-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.", + "remediation": "Enable Azure RBAC on AKS by using the following command: az aks update -g -n --enable-azure-rbac", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails in case Azure RBAC is not set on AKS instance.\ndeny[msga] {\n \tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot isAzureRBACEnabled(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Azure RBAC is not set. Enable it using the command: az aks update -g -n --enable-azure-rbac\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"az aks update -g -n --enable-azure-rbac\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": cluster_describe\n\t\t},\n\t} \n}\n\n# isAzureRBACEnabled check if Azure RBAC is enabled into ClusterDescribe object\n# retrieved from azure cli.\nisAzureRBACEnabled(properties) {\n properties.aadProfile.enableAzureRBAC == true\n}\n" + } + ] + }, + { + "controlID": "C-0242", + "name": "CIS-5.6.2 Hostile multi-tenant workloads", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "rule-hostile-multitenant-workloads", + "attributes": { + "actionRequired": "manual review" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "ruleDependencies": [], + "configInputs": [], + "controlConfigInputs": [], + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", + "remediation": "Use physically isolated clusters", + "ruleQuery": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" + } + ] + }, + { + "controlID": "C-0243", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities. Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security. When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file. When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "remediation": "Enable Azure Defender image scanning. Command: az aks update --enable-defender --resource-group --name ", + "ruleQuery": "armo_builtin", + "rule": "package armo_builtins\n\n# fails in case Azure Defender image scanning is not enabled.\ndeny[msga] {\n cluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties \n\n not isAzureImageScanningEnabled(properties)\n\n msga := {\n\t\t\"alertMessage\": \"Azure Defender image scanning is not enabled.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks update --enable-defender --resource-group --name \",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_describe\n },\n\n\t}\n}\n\n# isAzureImageScanningEnabled check if Azure Defender is enabled into the ClusterDescribe object.\nisAzureImageScanningEnabled(properties) {\n properties.securityProfile.defender.securityMonitoring.enabled == true\n}\n" + } + ] + }, + { + "controlID": "C-0244", + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted", + "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + } + ] + }, + { + "controlID": "C-0245", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "encrypt-traffic-to-https-load-balancers-with-tls-certificates", + "attributes": { + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails in case of 'Services' of type 'LoadBalancer' are not found.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type != \"LoadBalancer\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"No LoadBalancer service found.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n\t\t}\n\t}\n}\n\n# fails in case 'Service' object has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tnot svc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"]\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Service' object has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] != \"true\"\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Ingress' object has spec.tls value not set.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tnot isTLSSet(ingress.spec)\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has 'spec.tls' value not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"spec.tls\"],\n \t\"failedPaths\": [\"spec.tls\"],\n \t\"fixPaths\":[],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\n# fails in case 'Ingress' object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tisTLSSet(ingress.spec)\n\tingress.metadata.annotations[\"kubernetes.io/ingress.class\"] != \"azure/application-gateway\"\n\n\tpath := \"metadata.annotations[kubernetes.io/ingress.class]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"azure/application-gateway\"}],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\nisTLSSet(spec) {\n\tcount(spec.tls) > 0\n}\n" + } + ] + }, + { + "controlID": "C-0247", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", + "default_value": "By default, Endpoint Private Access is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "restrict-access-to-the-control-plane-endpoint", + "attributes": { + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\npackage armo_builtins\n\n# fails in case authorizedIPRanges is not set.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isAuthorizedIPRangesSet(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Parameter 'authorizedIPRanges' was not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n\n}\n\nisAuthorizedIPRangesSet(config) {\n\tcount(config.properties.apiServerAccessProfile.authorizedIPRanges) > 0\n}\n" + } + ] + }, + { + "controlID": "C-0248", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-clusters-are-created-with-private-nodes", + "attributes": { + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\n# fails in case enablePrivateCluster is set to false.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateClusterEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Cluster does not have private nodes.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr\",\n \t\"alertObject\": {\n\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateClusterEnabled(config) {\n\tconfig.properties.apiServerAccessProfile.enablePrivateCluster == true\n}\n" + } + ] + }, + { + "controlID": "C-0249", + "name": "CIS-5.6.1 Restrict untrusted workloads", + "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", + "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "actionRequired": "manual review" + }, + "baseScore": 5, + "impact_statement": "", + "default_value": "ACI is not a default component of the AKS", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "rule-manual", + "attributes": { + "actionRequired": "manual review", + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", + "remediation": "", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" + } + ] + }, + { + "controlID": "C-0250", + "name": "CIS-5.1.2 Minimize cluster access to read-only for Azure Container Registry (ACR)", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", + "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-service-principle-has-read-only-permissions", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + }, + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if servicePrincipal has permissions that are not read-only\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"aks\"\n\n\troleAssignment := resources.data.roleAssignments[_]\n\troleAssignment.properties.principalType == \"ServicePrincipal\"\n\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"aks\"\n\n\tpolicy := policies.data.roleDefinitions[_]\n\tpolicy.id == roleAssignment.properties.roleDefinitionId\n\n\t# check if policy has at least one action that is not read\n\tsome action in policy.properties.permissions[_].actions\n\t\tnot endswith(action, \"read\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"ServicePrincipal has permissions that are not read-only to ACR.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0251", + "name": "CIS-5.1.3 Minimize user access to Azure Container Registry (ACR)", + "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-role-definitions-in-acr", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# return ListEntitiesForPolicies resource in azure\ndeny[msg] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.apiVersion == \"management.azure.com/v1\"\n\tresources.metadata.provider == \"aks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0252", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled", + "attributes": { + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "ruleQuery": "armo_builtins", + "rule": "\npackage armo_builtins\n\n# fails in case privateEndpoint.id parameter is not found on ClusterDescribe\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateEndpointEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Private endpoint not enabled.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateEndpointEnabled(config) {\n\tconfig.properties.privateEndpoint.id\n}\n" + } + ] + }, + { + "controlID": "C-0254", + "name": "CIS-2.1.1 Enable audit Logs", + "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", + "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", + "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", + "default_value": "By default, cluster control plane logs aren't sent to be Logged.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "rule-manual", + "attributes": { + "actionRequired": "manual review", + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", + "remediation": "", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" + } + ] + } + ], + "ControlsIDs": [ + "C-0078", + "C-0088", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0182", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0201", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0211", + "C-0212", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0235", + "C-0238", + "C-0239", + "C-0240", + "C-0241", + "C-0242", + "C-0243", + "C-0244", + "C-0245", + "C-0247", + "C-0248", + "C-0249", + "C-0250", + "C-0251", + "C-0252", + "C-0254" + ] +} \ No newline at end of file diff --git a/releaseDev/cis-eks-t1.2.0.json b/releaseDev/cis-eks-t1.2.0.json new file mode 100644 index 000000000..0e00ccc52 --- /dev/null +++ b/releaseDev/cis-eks-t1.2.0.json @@ -0,0 +1,4456 @@ +{ + "name": "cis-eks-t1.2.0", + "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", + "attributes": { + "armoBuiltin": true, + "version": "v1.2.0" + }, + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "2": { + "name": "Control Plane Configuration", + "id": "2", + "subSections": { + "1": { + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0067" + ] + } + } + }, + "3": { + "name": "Worker Nodes", + "id": "3", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + }, + "2": { + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0183" + ] + }, + "3": { + "name": "Container Optimized OS", + "id": "3.3", + "controlsIDs": [ + "C-0226" + ] + } + } + }, + "4": { + "name": "Policies", + "id": "4", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0246" + ] + }, + "2": { + "name": "Pod Security Policies", + "id": "4.2", + "controlsIDs": [ + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0220" + ] + }, + "3": { + "name": "CNI Plugin", + "id": "4.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "name": "Secrets Management", + "id": "4.4", + "controlsIDs": [ + "C-0207", + "C-0234" + ] + }, + "6": { + "name": "General Policies", + "id": "4.6", + "controlsIDs": [ + "C-0209", + "C-0211", + "C-0212" + ] + } + } + }, + "5": { + "name": "Managed services", + "id": "5", + "subSections": { + "1": { + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0078", + "C-0221", + "C-0222", + "C-0223" + ] + }, + "2": { + "name": "Identity and Access Management (IAM)", + "id": "5.2", + "controlsIDs": [ + "C-0225" + ] + }, + "3": { + "name": "AWS EKS Key Management Service", + "id": "5.3", + "controlsIDs": [ + "C-0066" + ] + }, + "4": { + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231" + ] + }, + "5": { + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0232" + ] + }, + "6": { + "name": "Other Cluster Configurations", + "id": "5.6", + "controlsIDs": [ + "C-0233", + "C-0242" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation.", + "remediation": "This process can only be performed during Cluster Creation.\n\n Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section.", + "long_description": "Kubernetes can store secrets that pods can access via a mounted volume. Today, Kubernetes secrets are stored with Base64 encoding, but encrypting is the recommended approach. Amazon EKS clusters version 1.13 and higher support the capability of encrypting your Kubernetes secrets using AWS Key Management Service (KMS) Customer Managed Keys (CMK). The only requirement is to enable the encryption provider support during EKS cluster creation.\n\n Use AWS Key Management Service (KMS) keys to provide envelope encryption of Kubernetes secrets stored in Amazon EKS. Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.\n\n Application-layer Secrets Encryption provides an additional layer of security for sensitive data, such as user defined Secrets and Secrets required for the operation of the cluster, such as service account keys, which are all stored in etcd.\n\n Using this functionality, you can use a key, that you manage in AWS KMS, to encrypt data at the application layer. This protects against attackers in the event that they manage to gain access to etcd.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ], + "manual_test": "Using the etcdctl commandline, read that secret out of etcd:\n\n \n```\netcdCTL_API=3 etcdctl get /registry/secrets/default/secret1 [...] | hexdump -C\n\n```\n where [...] must be the additional arguments for connecting to the etcd server.\n\n Verify the stored secret is prefixed with k8s:enc:aescbc:v1: which indicates the aescbc provider has encrypted the resulting data.", + "references": [ + "https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-eks-adds-envelope-encryption-for-secrets-with-aws-kms/" + ], + "impact_statement": "", + "default_value": "By default secrets created using the Kubernetes API are stored in *tmpfs* and are encrypted at rest." + }, + { + "name": "CIS-2.1.1 Enable audit Logs", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Control plane logs provide visibility into operation of the EKS Control plane component systems. The API server audit logs record all accepted and rejected requests in the cluster. When enabled via EKS configuration the control plane logs for a cluster are exported to a CloudWatch Log Group for persistence.", + "remediation": "**From Console:**\n\n 1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n\n \n```\nAPI server: Enabled\nAudit: Enabled\t\nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n\n```\n 5. Click 'Save Changes'.\n\n **From CLI:**\n\n \n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n\n```", + "long_description": "Audit logs enable visibility into all API server requests from authentic and anonymous sources. Stored log data can be analyzed manually or with tools to identify and understand anomalous or negative activity and lead to intelligent remediations.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Use approved container registries.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useUntilKubescapeVersion": "v2.3.8" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + }, + { + "name": "container-image-repository-v1", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useFromKubescapeVersion": "v2.9.0" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" + } + ], + "references": [ + "https://aws.amazon.com/blogs/opensource/using-open-policy-agent-on-amazon-eks/" + ], + "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry.", + "default_value": "" + }, + { + "controlID": "C-0167", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node.\n\n For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AWS EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0171", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the AWS EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0172", + "name": "CIS-3.2.1 Ensure that the Anonymous Auth is Not Enabled", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Disable Anonymous Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--anonymous-auth=false\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the[Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Anonymous Authentication is not enabled. This may be configured as a command line argument to the kubelet service with `--anonymous-auth=false` or in the kubelet configuration file via `\"authentication\": { \"anonymous\": { \"enabled\": false }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with `kubectl` on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Anonymous Authentication is not enabled checking that `\"authentication\": { \"anonymous\": { \"enabled\": false }` is in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "See the EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0173", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets can be configured to allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Enable Webhook Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n\n```\n Next, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n \n```\n\"authorization\": { \"mode\": \"Webhook }\n\n```\n Finer detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Webhook Authentication is enabled. This may be enabled as a command line argument to the kubelet service with `--authentication-token-webhook` or in the kubelet configuration file via `\"authentication\": { \"webhook\": { \"enabled\": true } }`.\n\n Verify that the Authorization Mode is set to `WebHook`. This may be set as a command line argument to the kubelet service with `--authorization-mode=Webhook` or in the configuration file via `\"authorization\": { \"mode\": \"Webhook }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Webhook Authentication is enabled with `\"authentication\": { \"webhook\": { \"enabled\": true } }` in the API response.\n\n Verify that the Authorization Mode is set to `WebHook` with `\"authorization\": { \"mode\": \"Webhook }` in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "See the EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-authorization-mode-alwaysAllow", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "Change authorization mode to Webhook.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [\"authorization.mode\"],\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0174", + "name": "CIS-3.2.3 Ensure that a Client CA File is Configured", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Configure the client certificate authority file by setting the following parameter appropriately:\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--client-ca-file=\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that a client certificate authority file is configured. This may be configured using a command line argument to the kubelet service with `--client-ca-file` or in the kubelet configuration file via `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that a client certificate authority file is configured with `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"` in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "See the EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0175", + "name": "CIS-3.2.4 Ensure that the --read-only-port is disabled", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0\n\n \n```\n\"readOnlyPort\": 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For each remediation:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "read-only-port-enabled-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet has read-only port enabled.", + "remediation": "Start the kubelet with the --read-only-port flag set to 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"reviewPaths\": [\"readOnlyPort\"],\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0176", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/pull/18552" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "See the EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-streaming-connection-idle-timeout", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if a kubelet has not disabled timeouts on streaming connections", + "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0177", + "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/" + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "See the EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-protect-kernel-defaults", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the --protect-kernel-defaults argument is set to true.", + "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"protectKernelDefaults\"],\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0178", + "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the above command includes the argument `--make-iptables-util-chains` then verify it is set to true.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains.:true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-ip-tables", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensures that the --make-iptables-util-chains argument is set to true.", + "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0179", + "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", + "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/issues/22063", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-hostname-override", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --hostname-override argument is not set.", + "remediation": "Unset the --hostname-override argument.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0180", + "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-event-qps", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", + "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"eventRecordQPS\"],\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0181", + "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not present or is set to true", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", + "references": [ + "https://github.com/kubernetes/kubernetes/pull/41912", + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration", + "https://kubernetes.io/docs/imported/release/notes/", + "https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "validate-kubelet-tls-configuration-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletConfiguration", + "KubeletCommandLine" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", + "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t# get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0183", + "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"featureGates\": {\n \"RotateKubeletServerCertificate\":true\n},\n\n```\n Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediation methods:**\nRestart the `kubelet` service and check status. The example below is for when using systemctl to manage services:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--rotate-kubelet-server-certificate` executable argument verify that it is set to true.\n\n If the process does not have the `--rotate-kubelet-server-certificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists in the `featureGates` section and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://github.com/kubernetes/kubernetes/pull/45059", + "https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-rotate-kubelet-server-certificate", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`RotateKubeletServerCertificate=true`, args[i])\n}\n" + } + ] + }, + { + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "https://kubernetes.io/docs/admin/authorization/rbac/#user-facing-roles" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "cluster-admin-role", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\n# regal ignore:rule-length\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-create-pod", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can create pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "https://aws.github.io/aws-eks-best-practices/iam/#disable-auto-mounting-of-service-account-tokens" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-default-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" + }, + { + "name": "namespace-without-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Namespace", + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace does not have service accounts (not incluiding default)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "CIS-4.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", + "references": [ + "https://www.impidio.com/blog/kubernetes-rbac-security-pitfalls", + "https://raesene.github.io/blog/2020/12/12/Escalating_Away/", + "https://raesene.github.io/blog/2021/01/16/Getting-Into-A-Bind-with-Kubernetes/" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-bind-escalate", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can or bind escalate roles/clusterroles", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "controlID": "C-0205", + "name": "CIS-4.3.1 Ensure CNI plugin supports network policies.", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports network policies.", + "references": [ + "https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/", + "https://aws.github.io/aws-eks-best-practices/network/" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None.", + "default_value": "This will depend on the CNI plugin in use.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-cni-in-use-supports-network-policies", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [], + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" + } + ] + }, + { + "name": "CIS-4.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "https://kubernetes.io/docs/concepts/services-networking/networkpolicies/", + "https://octetz.com/posts/k8s-network-policy-apis", + "https://kubernetes.io/docs/tasks/configure-pod-container/declare-network-policy/" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "default_value": "By default, network policies are not created.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "category": { + "name": "Workload", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-secrets-in-env-var", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if Pods have secrets in environment variables", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "CIS-4.6.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Amazon EKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "default_value": "By default, Kubernetes starts with two initial namespaces:\n\n 1. `default` - The default namespace for objects with no other namespace\n2. `kube-system` - The namespace for objects created by the Kubernetes system\n3. `kube-public` - The namespace for public-readable ConfigMap\n4. `kube-node-lease` - The namespace for associated lease object for each node", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-namespaces", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + } + ], + "ruleDependencies": [], + "description": "lists all namespaces for users to review", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.6.2 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + ], + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + }, + { + "name": "immutable-container-filesystem", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" + }, + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + }, + { + "name": "drop-capability-netraw", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "set-seLinuxOptions", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if workload and container do not define any seLinuxOptions", + "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-seccomp-profile", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-procmount-default", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n" + }, + { + "name": "set-fsgroup-value", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n" + }, + { + "name": "set-fsgroupchangepolicy-value", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" + }, + { + "name": "set-sysctls-params", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.sysctls is not set.", + "remediation": "Set securityContext.sysctls params", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has sysctls set\n not pod.spec.securityContext.sysctls\n\n path := \"spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.sysctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has sysctls set\n not wl.spec.template.spec.securityContext.sysctls\n\n path := \"spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.sysctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has sysctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.sysctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "set-supplementalgroups-values", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n\tnot pod.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n\tnot wl.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n\tnot cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n" + }, + { + "name": "rule-allow-privilege-escalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "CIS-4.6.3 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "rolebinding-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "role-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "configmap-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "endpoints-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Endpoints" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "persistentvolumeclaim-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PersistentVolumeClaim" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "podtemplate-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodTemplate" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "replicationcontroller-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ReplicationController" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "service-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "serviceaccount-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "endpointslice-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "discovery.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "EndpointSlice" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "horizontalpodautoscaler-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "autoscaling" + ], + "apiVersions": [ + "v2" + ], + "resources": [ + "HorizontalPodAutoscaler" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "lease-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "coordination.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Lease" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "csistoragecapacity-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "CSIStorageCapacity" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ingress-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "poddisruptionbudget-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodDisruptionBudget" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-secret-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Secret" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + }, + { + "controlID": "C-0213", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" + ], + "attributes": {}, + "baseScore": 8.0, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "psp-deny-privileged-container", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0214", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostpid", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0215", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostipc", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0216", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostnetwork", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0217", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-allowprivilegeescalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0218", + "name": "CIS-4.2.6 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-root-container", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" + } + ] + }, + { + "controlID": "C-0219", + "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-allowed-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0220", + "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-required-drop-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs don't have requiredDropCapabilities\n\t# if even one PSP has requiredDropCapabilities, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot has_requiredDropCapabilities(psp.spec)\n\t}\n\n\t# return al the PSPs that don't have requiredDropCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot has_requiredDropCapabilities(psp.spec)\n\n\tfixpath := {\"path\":\"spec.requiredDropCapabilities[0]\", \"value\":\"ALL\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' doesn't have requiredDropCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\nhas_requiredDropCapabilities(spec) {\n\tcount(spec.requiredDropCapabilities) > 0\n}\n" + } + ] + }, + { + "controlID": "C-0221", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", + "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", + "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ensure-image-scanning-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "DescribeRepositories" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Check if image scanning enabled for EKS\ndeny[msga] {\n\tdescribe_repositories := input[_]\n\tdescribe_repositories.apiVersion == \"eks.amazonaws.com/v1\"\n\tdescribe_repositories.kind == \"DescribeRepositories\"\n\tdescribe_repositories.metadata.provider == \"eks\"\n\trepos := describe_repositories.data.Repositories\n\tsome repo in repos\n\tnot image_scanning_configured(repo)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": \"image scanning is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": describe_repositories,\n\t\t},\n\t}\n}\n\nimage_scanning_configured(repo) {\n\trepo.ImageScanningConfiguration.ScanOnPush == true\n}" + } + ] + }, + { + "controlID": "C-0222", + "name": "CIS-5.1.2 Minimize user access to Amazon ECR", + "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-aws-policies-are-present", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "fails if aws policies are not found", + "remediation": "Implement policies to minimize user access to Amazon ECR", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# deny if policies are not present on AWS\ndeny[msg] {\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"Cluster has not policies to minimize access to Amazon ECR; Add some policy in order to minimize access on it.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": policies\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0223", + "name": "CIS-5.1.3 Minimize cluster access to read-only for Amazon ECR", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", + "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", + "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure_nodeinstancerole_has_right_permissions_for_ecr", + "attributes": { + "useFromKubescapeVersion": "v2.2.5" + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if a NodeInstanceRole has a policies not compliant with the following:\n# {\n# \"Version\": \"YYY-MM-DD\",\n# \"Statement\": [\n# {\n# \"Effect\": \"Allow\",\n# \"Action\": [\n# \"ecr:BatchCheckLayerAvailability\",\n# \"ecr:BatchGetImage\",\n# \"ecr:GetDownloadUrlForLayer\",\n# \"ecr:GetAuthorizationToken\"\n# ],\n# \"Resource\": \"*\"\n# }\n# ]\n# }\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"eks\"\n\n\trole_policies := resources.data.rolesPolicies\n\tnode_instance_role_policies := [key | role_policies[key]; contains(role_policies[key].PolicyRoles[_].RoleName, \"NodeInstance\")]\n\n\t# check if the policy satisfies the minimum prerequisites\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\t# node_instance_role_policies := [\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"]\n\tsome policy in node_instance_role_policies\n\t\tsome stat, _ in policies.data.policiesDocuments[policy].Statement\n\t\t\tnot isPolicyCompliant(policies, policy, stat)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Cluster has none read-only access to ECR; Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n\nisPolicyCompliant(policies, policy, stat) {\n\t# allowed action provided by the CIS\n\tallowed_actions := [\"ecr:BatchCheckLayerAvailability\",\n \t \"ecr:BatchGetImage\",\n \t \"ecr:GetAuthorizationToken\",\n \t \"ecr:GetDownloadUrlForLayer\"]\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Effect == \"Allow\"\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Resource == \"*\"\n\tsorted_actions := sort(policies.data.policiesDocuments[policy].Statement[stat].Action)\n\tsorted_actions == allowed_actions\n}\n" + } + ] + }, + { + "controlID": "C-0225", + "name": "CIS-5.2.1 Prefer using dedicated EKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", + "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-default-service-accounts-has-only-default-roles", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"deletePaths\": [sprintf(\"subjects[%d]\", [i])],\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "automount-default-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" + } + ] + }, + { + "controlID": "C-0226", + "name": "CIS-3.3.1 Prefer using a container-optimized OS when possible", + "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", + "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", + "remediation": "", + "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", + "references": [ + "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", + "https://aws.amazon.com/bottlerocket/" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", + "default_value": "A container-optimized OS is not the default.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "alert-container-optimized-os-not-in-use", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n\n# checks if a node is not using a \"Container-Optimized OS\". \n# \"Container-Optimized OS\" prefixes are configured in 'container_optimized_os_prefixes'. \n# deny if 'nodes.status.nodeInfo.osImage' not starting with at least one item in 'container_optimized_os_prefixes'.\ndeny[msga] {\n\n\tnodes := input[_]\n\tnodes.kind == \"Node\"\n\n\t# list of \"Container-Optimized OS\" images prefixes \n\tcontainer_optimized_os_prefixes = [\"Bottlerocket\"]\n\n\t# check if osImage starts with at least one prefix\n\tsome str in container_optimized_os_prefixes\n\tnot startswith(nodes.status.nodeInfo.osImage, str)\n\n\t# prepare message data.\n\talert_message := \"Prefer using Container-Optimized OS when possible\"\n\n\tfailedPaths:= [\"status.nodeInfo.osImage\"]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [nodes]\n\t\t}\n\t}\n}" + } + ] + }, + { + "controlID": "C-0227", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": {}, + "baseScore": 8.0, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", + "default_value": "By default, Endpoint Public Access is disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-endpointprivateaccess-is-enabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "controlID": "C-0228", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "Check for private endpoint access to the Kubernetes API server", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": {}, + "baseScore": 8.0, + "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", + "default_value": "By default, the Public Endpoint is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled or EndpointPublicAccess is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\t\t\n\tis_endpointaccess_misconfigured(config)\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled, or EndpointPublicAccess is enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs='203.0.113.5/32'\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n# check if EndpointPrivateAccess is disabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false\n}\n\n# check if EndpointPublicAccess is enabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n}\n\n" + } + ] + }, + { + "controlID": "C-0229", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", + "manual_test": "", + "references": [], + "attributes": {}, + "baseScore": 8.0, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Check if EndpointPublicAccess in enabled on a private node for EKS. A private node is a node with no public ips access.\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n\n\t# filter out private nodes\n\t\"0.0.0.0/0\" in config.Cluster.ResourcesVpcConfig.PublicAccessCidrs\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPublicAccess is enabled on a private node\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "controlID": "C-0230", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", + "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", + "remediation": "", + "manual_test": "", + "references": [], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-network-policy-is-enabled-eks", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# EKS supports Calico and Cilium add-ons, both supports Network Policy.\n# Deny if at least on of them is not in the list of CNINames.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfos(obj)\n\n\tnot \"Calico\" in obj.data.CNINames\n\tnot \"Cilium\" in obj.data.CNINames\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfos(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0231", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-https-loadbalancers-encrypted-with-tls-aws", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "ruleDependencies": [], + "relevantCloudProviders": [ + "EKS" + ], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# deny LoadBalancer services that are configured for ssl connection (port: 443), but don't have TLS certificate set.\ndeny[msga] {\n\n\twl_kind := \"Service\"\n\twl_type := \"LoadBalancer\"\n\twl_required_annotation := \"service.beta.kubernetes.io/aws-load-balancer-ssl-cert\"\n\n\t# filterring LoadBalancers\n\twl := \tinput[_]\n\twl.kind == wl_kind\n\twl.spec.type == wl_type\n\n\t# filterring loadbalancers with port 443.\n\twl.spec.ports[_].port == 443\n\n\t# filterring annotations without ssl cert confgiured.\n\tannotations := object.get(wl, [\"metadata\", \"annotations\"], [])\n\tssl_cert_annotations := [annotations[i] | annotation = i; startswith(i, wl_required_annotation)]\n\tcount(ssl_cert_annotations) == 0\n\n\t# prepare message data.\n\talert_message := sprintf(\"LoadBalancer '%v' has no TLS configured\", [wl.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\": sprintf(\"metadata.annotations['%v']\", [wl_required_annotation]), \"value\": \"AWS_LOADBALANCER_SSL_CERT\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wl\n\t\t}\n\t}\n}\n\n", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\tobj := input[_]\n\tobj.kind == \"Service\"\n\tobj.spec.type == \"LoadBalancer\"\n\tmsga := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n" + } + ] + }, + { + "controlID": "C-0232", + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", + "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", + "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", + "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", + "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", + "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "review-roles-with-aws-iam-authenticator", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresource.kind == \"Role\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"For namespace '%v', make sure Kubernetes RBAC users are managed with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156\", [resource.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resource\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0233", + "name": "CIS-5.6.1 Consider Fargate for running untrusted workloads", + "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", + "long_description": "", + "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "", + "default_value": "By default, AWS Fargate is not utilized.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "alert-fargate-not-in-use", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n\n\n# deny if fargate is not being used in any of the nodes in cluster.\n# a Node is identified as using fargate if it's name starts with 'fargate'.\ndeny[msga] {\n\n\n # get all nodes\n nodes := [node | node = input[_]; node.kind == \"Node\"]\n count(nodes) > 0\n\n # get all nodes without fargate\n nodes_not_fargate := [node | node = nodes[_]; not startswith(node.metadata.name, \"fargate\")]\n\n # if count of all nodes equals to count of nodes_not_fargate it means fargate is not being used.\n count(nodes) == count(nodes_not_fargate)\n\n\t# prepare message data.\n\talert_message := \"Consider Fargate for running untrusted workloads\"\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": nodes_not_fargate\n\t\t}\n\t}\n}" + } + ] + }, + { + "controlID": "C-0234", + "name": "CIS-4.4.2 Consider external secret storage", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "manual_test": "Review your secrets management implementation.", + "references": [], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-external-secrets-storage-is-in-use", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "relevantCloudProviders": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.kubernetes.api.client\n\n# deny workloads that doesn't support external service provider (secretProviderClass)\n# reference - https://secrets-store-csi-driver.sigs.k8s.io/concepts.html\ndeny[msga] {\n\n resources := input[_]\n\n\t# get volume paths for each resource\n\tvolumes_path := get_volumes_path(resources)\n\n\t# get volumes for each resources\n\tvolumes := object.get(resources, volumes_path, [])\n\n\t# continue if secretProviderClass not found in resource\n\thaving_secretProviderClass := {i | volumes[i].csi.volumeAttributes.secretProviderClass}\n \tcount(having_secretProviderClass) == 0\n\n\n\t# prepare message data.\n\talert_message := sprintf(\"%s: %v is not using external secret storage\", [resources.kind, resources.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\":sprintf(\"%s[0].csi.volumeAttributes.secretProviderClass\",[concat(\".\", volumes_path)]), \"value\":\"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\n}\n\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n" + } + ] + }, + { + "controlID": "C-0235", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0238", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0242", + "name": "CIS-5.6.2 Hostile multi-tenant workloads", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "rule-hostile-multitenant-workloads", + "attributes": { + "actionRequired": "manual review" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "ruleDependencies": [], + "configInputs": [], + "controlConfigInputs": [], + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", + "remediation": "Use physically isolated clusters", + "ruleQuery": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" + } + ] + }, + { + "controlID": "C-0246", + "name": "CIS-4.1.7 Avoid use of system:masters group", + "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", + "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", + "remediation": "Remove the `system:masters` group from all users in the cluster.", + "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", + "references": [ + "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", + "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rule-manual", + "attributes": { + "actionRequired": "manual review", + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", + "remediation": "", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" + } + ] + } + ], + "ControlsIDs": [ + "C-0066", + "C-0067", + "C-0078", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0205", + "C-0206", + "C-0207", + "C-0209", + "C-0211", + "C-0212", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0220", + "C-0221", + "C-0222", + "C-0223", + "C-0225", + "C-0226", + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231", + "C-0232", + "C-0233", + "C-0234", + "C-0235", + "C-0238", + "C-0242", + "C-0246" + ] +} \ No newline at end of file diff --git a/releaseDev/cis-v1.23-t1.0.1.json b/releaseDev/cis-v1.23-t1.0.1.json new file mode 100644 index 000000000..8d7738c6b --- /dev/null +++ b/releaseDev/cis-v1.23-t1.0.1.json @@ -0,0 +1,8583 @@ +{ + "name": "cis-v1.23-t1.0.1", + "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", + "attributes": { + "armoBuiltin": true, + "version": "v1.0.1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "1": { + "id": "1", + "name": "Control Plane Components", + "subSections": { + "1": { + "id": "1.1", + "name": "Control Plane Node Configuration Files", + "controlsIDs": [ + "C-0092", + "C-0093", + "C-0094", + "C-0095", + "C-0096", + "C-0097", + "C-0098", + "C-0099", + "C-0100", + "C-0101", + "C-0102", + "C-0103", + "C-0104", + "C-0105", + "C-0106", + "C-0107", + "C-0108", + "C-0109", + "C-0110", + "C-0111", + "C-0112" + ] + }, + "2": { + "id": "1.2", + "name": "API Server", + "controlsIDs": [ + "C-0113", + "C-0114", + "C-0115", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0124", + "C-0125", + "C-0126", + "C-0127", + "C-0128", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0143" + ] + }, + "3": { + "id": "1.3", + "name": "Controller Manager", + "controlsIDs": [ + "C-0144", + "C-0145", + "C-0146", + "C-0147", + "C-0148", + "C-0149", + "C-0150" + ] + }, + "4": { + "id": "1.4", + "name": "Scheduler", + "controlsIDs": [ + "C-0151", + "C-0152" + ] + } + } + }, + "2": { + "name": "etcd", + "id": "2", + "controlsIDs": [ + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159" + ] + }, + "3": { + "name": "Control Plane Configuration", + "id": "3", + "subSections": { + "2": { + "name": "Logging", + "id": "3.2", + "controlsIDs": [ + "C-0160", + "C-0161" + ] + } + } + }, + "4": { + "name": "Worker Nodes", + "id": "4", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "4.1", + "controlsIDs": [ + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171" + ] + }, + "2": { + "name": "Kubelet", + "id": "4.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184" + ] + } + } + }, + "5": { + "name": "Policies", + "id": "5", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "5.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191" + ] + }, + "2": { + "name": "Pod Security Standards", + "id": "5.2", + "controlsIDs": [ + "C-0192", + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204" + ] + }, + "3": { + "name": "Network Policies and CNI", + "id": "5.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "name": "Secrets Management", + "id": "5.4", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "7": { + "name": "General Policies", + "id": "5.7", + "controlsIDs": [ + "C-0209", + "C-0210", + "C-0211", + "C-0212" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "controlID": "C-0092", + "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0093", + "name": "CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0094", + "name": "CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0095", + "name": "CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0096", + "name": "CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0097", + "name": "CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0098", + "name": "CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0099", + "name": "CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0100", + "name": "CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0101", + "name": "CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0102", + "name": "CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory has permissions of `755`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 448 # == 0o700\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0103", + "name": "CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0104", + "name": "CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, admin.conf has permissions of `600`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0105", + "name": "CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0106", + "name": "CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0107", + "name": "CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0108", + "name": "CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0109", + "name": "CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0110", + "name": "CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0111", + "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".crt\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0112", + "name": "CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".key\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0113", + "name": "CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the API server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable anonymous requests to the API server.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0114", + "name": "CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", + "description": "Do not use token based authentication.", + "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not use token based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.\n\n#### Impact Statement\nYou will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server TLS is not configured\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tre := \" ?--token-auth-file=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd[i], -1)\n\tcount(matchs) > 0\n\tfixed = replace(cmd[i], matchs[0][0], \"\")\n\tresult = get_result(sprintf(\"spec.containers[0].command[%d]\", [i]), fixed)\n}\n\n# Get fix and failed paths\nget_result(path, fixed) = result {\n\tfixed == \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(path, fixed) = result {\n\tfixed != \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed,\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0115", + "name": "CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"DenyServiceExternalIPs\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0116", + "name": "CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "description": "Enable certificate based kubelet authentication.", + "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, certificate-based kubelet authentication is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable certificate based kubelet authentication.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, certificate-based kubelet authentication is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"certificate based kubelet authentication is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t\"--kubelet-client-certificate\",\n\t\t\"--kubelet-client-key\",\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=\", [wanted[i]]),\n\t} |\n\t\twanted[i]\n\t\tnot contains(full_cmd, wanted[i])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0117", + "name": "CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", + "description": "Verify kubelet's certificate before establishing connection.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Verify kubelet's certificate before establishing connection.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, `--kubelet-certificate-authority` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority file is not specified\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--kubelet-certificate-authority\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--kubelet-certificate-authority=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0118", + "name": "CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not always authorize all requests.", + "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Only authorized requests will be served.", + "default_value": "By default, `AlwaysAllow` is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not always authorize all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```\n\n#### Impact Statement\nOnly authorized requests will be served.\n\n#### Default Value\nBy default, `AlwaysAllow` is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"AlwaysAllow authorization mode is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# Check if include AlwaysAllow\n\t\"AlwaysAllow\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val = flag.values[_]; val != \"AlwaysAllow\"]\n\tfixed_flag = get_fixed_flag(fixed_values)\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\n\nget_fixed_flag(values) = fixed {\n\tcount(values) == 0\n\tfixed = \"--authorization-mode=RBAC\" # If no authorization-mode, set it to RBAC, as recommended by CIS\n}\nget_fixed_flag(values) = fixed {\n\tcount(values) > 0\n\tfixed = sprintf(\"--authorization-mode=%s\", [concat(\",\", values)])\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0119", + "name": "CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, `Node` authorization is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `Node` authorization is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubelet nodes can read objects that are not associated with them\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"Node\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"Node\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=Node\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0120", + "name": "CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", + "description": "Turn on Role Based Access Control.", + "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", + "default_value": "By default, `RBAC` authorization is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Turn on Role Based Access Control.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nWhen RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.\n\n#### Default Value\nBy default, `RBAC` authorization is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"RBAC\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"RBAC\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=RBAC\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0121", + "name": "CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", + "description": "Limit the rate at which the API server accepts requests.", + "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "You need to carefully tune in limits as per your environment.", + "default_value": "By default, `EventRateLimit` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Limit the rate at which the API server accepts requests.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```\n\n#### Impact Statement\nYou need to carefully tune in limits as per your environment.\n\n#### Default Value\nBy default, `EventRateLimit` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"EventRateLimit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"EventRateLimit\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=EventRateLimit\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0122", + "name": "CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", + "description": "Do not allow all requests.", + "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", + "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not allow all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.\n\n#### Impact Statement\nOnly requests explicitly allowed by the admissions control plugins would be served.\n\n#### Default Value\n`AlwaysAdmit` is not in the list of default admission plugins.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\t\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"AlwaysAdmit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"AlwaysAdmit\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0123", + "name": "CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", + "description": "Always pull images.", + "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" + ], + "attributes": {}, + "baseScore": 4, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", + "default_value": "By default, `AlwaysPullImages` is not set.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Always pull images.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```\n\n#### Impact Statement\nCredentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.\n\n#### Default Value\nBy default, `AlwaysPullImages` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"Admission control policy is not set to AlwaysPullImages\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"AlwaysPullImages\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"AlwaysPullImages\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=AlwaysPullImages\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0124", + "name": "CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", + "default_value": "By default, `SecurityContextDeny` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```\n\n#### Impact Statement\nThis admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies\n\n#### Default Value\nBy default, `SecurityContextDeny` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\":\"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster\", \n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"SecurityContextDeny\" in flag.values\n\tnot \"PodSecurityPolicy\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"SecurityContextDeny\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=SecurityContextDeny\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0125", + "name": "CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", + "description": "Automate service accounts management.", + "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "None.", + "default_value": "By default, `ServiceAccount` is set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Automate service accounts management.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.\n\n#### Impact Statement\nNone.\n\n#### Default Value\nBy default, `ServiceAccount` is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"ServiceAccount\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"ServiceAccount\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0126", + "name": "CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", + "description": "Reject creating objects in a namespace that is undergoing termination.", + "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "None", + "default_value": "By default, `NamespaceLifecycle` is set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Reject creating objects in a namespace that is undergoing termination.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NamespaceLifecycle` is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"NamespaceLifecycle\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"NamespaceLifecycle\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0127", + "name": "CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `NodeRestriction` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NodeRestriction` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"NodeRestriction is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"NodeRestriction\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"NodeRestriction\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=NodeRestriction\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0128", + "name": "CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", + "description": "Do not disable the secure port.", + "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "You need to set the API Server up with the right TLS certificates.", + "default_value": "By default, port 6443 is used as the secure port.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not disable the secure port.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.\n\n#### Impact Statement\nYou need to set the API Server up with the right TLS certificates.\n\n#### Default Value\nBy default, port 6443 is used as the secure port.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tcontains(obj.spec.containers[0].command[i], \"--secure-port=0\")\n\tmsg := {\n\t\t\"alertMessage\": \"the secure port is disabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"failedPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0129", + "name": "CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled. This could potentially be exploited to uncover system and program details.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--profiling=true\")\n\tfixed = replace(cmd[i], \"--profiling=true\", \"--profiling=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0130", + "name": "CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubernetes API Server is not audited\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-path\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-path=/var/log/apiserver/audit.log\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0131", + "name": "CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", + "description": "Retain the logs for at least 30 days or as appropriate.", + "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Retain the logs for at least 30 days or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_value(cmd) = {\"origin\": origin, \"value\": value} {\n\tre := \" ?--audit-log-maxage=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalue = to_number(matchs[0][1])\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag = get_flag_value(cmd[i])\n\tflag.value < 30\n\tfixed = replace(cmd[i], flag.origin, \"--audit-log-maxage=30\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"alert\": sprintf(\"Audit log retention period is %v days, which is too small (should be at least 30 days)\", [flag.value]),\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxage\")\n\tresult = {\n\t\t\"alert\": \"Audit log retention period is not set\",\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%v]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-maxage=30\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0132", + "name": "CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", + "description": "Retain 10 or an appropriate number of old log files.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Retain 10 or an appropriate number of old log files.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxbackup\")\n\tresult = {\n\t\t\"alert\": \"Please validate that the audit log max backup is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxbackup\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max backup is not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxbackup=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0133", + "name": "CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", + "attributes": { + "hostSensorRule": "true", + "useFromKubescapeVersion": "v2.0.159" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxsize\")\n\tresult = {\n\t\t\"alert\": \"Please validate that audit-log-maxsize has an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxsize\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max size not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxsize=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0134", + "name": "CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", + "description": "Set global request timeout for API server requests as appropriate.", + "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--request-timeout` is set to 60 seconds.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Set global request timeout for API server requests as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--request-timeout` is set to 60 seconds.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--request-timeout\")\n\tresult = {\n\t\t\"alert\": \"Please validate the request timeout flag is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0135", + "name": "CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", + "description": "Validate service account before validating token.", + "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `--service-account-lookup` argument is set to `true`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Validate service account before validating token.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--service-account-lookup` argument is set to `true`.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) := invalid_flags[0] {\n\tinvalid_flags := [flag |\n\t\tsome i, c in cmd\n\t\tflag := get_result(c, i)\n\t]\n}\n\nget_result(cmd, i) = result {\n\tcmd == \"--service-account-lookup=false\"\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(cmd, i) = result {\n\tcmd != \"--service-account-lookup=false\"\n\tcontains(cmd, \"--service-account-lookup=false\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": replace(cmd, \"--service-account-lookup=false\", \"--service-account-lookup=true\"),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0136", + "name": "CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-key-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", + "attributes": { + "hostSensorRule": "true", + "useFromKubescapeVersion": "v2.0.159" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```\n\n#### Impact Statement\nThe corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-key-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-key-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0137", + "name": "CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"etcd is not configured to use TLS properly\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--etcd-certfile\", \"\"],\n\t\t[\"--etcd-keyfile\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0138", + "name": "CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to serve only HTTPS traffic\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--tls-cert-file\", \"\"],\n\t\t[\"--tls-private-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0139", + "name": "CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--client-ca-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server communication is not encrypted properly\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-ca-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0140", + "name": "CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-cafile` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-cafile` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to use SSL Certificate Authority file for etcd\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--etcd-cafile\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--etcd-cafile=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0141", + "name": "CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", + "description": "Encrypt etcd key-value store.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `--encryption-provider-config` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Encrypt etcd key-value store.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--encryption-provider-config` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is not set at all\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\n\tcmd := obj.spec.containers[0].command\n\tnot contains(concat(\" \", cmd), \"--encryption-provider-config\")\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config file not set\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--encryption-provider-config=\",\n\t\t}],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n# Encryption config is set but not covering secrets\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# Check if the config conver secrets\n\tcount({true | \"secrets\" in config_file_content.resources[_].resources}) == 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not covering secrets\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tfilter_input(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nfilter_input(obj){\n\tis_api_server(obj)\n}\nfilter_input(obj){\n\tis_control_plane_info(obj)\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0142", + "name": "CIS-1.2.30 Ensure that encryption providers are appropriately configured", + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, no encryption provider is set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, no encryption provider is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is set but not using one of the recommended providers\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# For each resource check if it does not have allowed provider\n\tfix_paths := [{\n\t\t\"path\": sprintf(\"resources[%d].providers[%d]\", [i, count(resource.providers)]),\n\t\t\"value\": \"{\\\"aescbc\\\" | \\\"secretbox\\\" | \\\"kms\\\" : }\", # must be string\n\t} |\n\t\tresource := config_file_content.resources[i]\n\t\tcount({true |\n\t\t\tsome provider in resource.providers\n\t\t\thas_one_of_keys(provider, [\"aescbc\", \"secretbox\", \"kms\"])\n\t\t}) == 0\n\t]\n\n\tcount(fix_paths) > 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using one of the allowed providers (aescbc, secretbox, kms)\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_key(x, k) {\n\t_ = x[k]\n}\n\nhas_one_of_keys(x, keys) {\n\thas_key(x, keys[_])\n}\n" + } + ] + }, + { + "controlID": "C-0143", + "name": "CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\twanted = [\n\t\t\"TLS_AES_128_GCM_SHA256\",\n\t\t\"TLS_AES_256_GCM_SHA384\",\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t]\n\tresult = invalid_flag(obj.spec.containers[0].command, wanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, wanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tmissing = [x | x = wanted[_]; not x in flag.values]\n\tcount(missing) > 0\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, missing)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0144", + "name": "CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", + "description": "Activate garbage collector on pod termination, as appropriate.", + "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Activate garbage collector on pod termination, as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--terminated-pod-gc-threshold` is set to `12500`.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--terminated-pod-gc-threshold\")\n\tresult = {\n\t\t\"alert\": \"Please validate that --terminated-pod-gc-threshold is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--terminated-pod-gc-threshold\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"--terminated-pod-gc-threshold flag not set to an appropriate value\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--terminated-pod-gc-threshold=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0145", + "name": "CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-controller-manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0146", + "name": "CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", + "description": "Use individual service account credentials for each controller.", + "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" + ], + "attributes": {}, + "baseScore": 4, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", + "default_value": "By default, `--use-service-account-credentials` is set to false.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Use individual service account credentials for each controller.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```\n\n#### Impact Statement\nWhatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.\n\n#### Default Value\nBy default, `--use-service-account-credentials` is set to false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"--use-service-account-credentials is set to false in the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--use-service-account-credentials=false\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--use-service-account-credentials=true\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--use-service-account-credentials\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--use-service-account-credentials=true\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0147", + "name": "CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-private-key-file` it not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```\n\n#### Impact Statement\nYou would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-private-key-file` it not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token can not be rotated as needed\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-private-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-private-key-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0148", + "name": "CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "You need to setup and maintain root certificate authority file.", + "default_value": "By default, `--root-ca-file` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```\n\n#### Impact Statement\nYou need to setup and maintain root certificate authority file.\n\n#### Default Value\nBy default, `--root-ca-file` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--root-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--root-ca-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0149", + "name": "CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation on controller-manager.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable kubelet server certificate rotation on controller-manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"`RotateKubeletServerCertificate` is set to false on the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"RotateKubeletServerCertificate=false\")\n\tfixed = replace(cmd[i], \"RotateKubeletServerCertificate=false\", \"RotateKubeletServerCertificate=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0150", + "name": "CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the Controller Manager API service is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue =matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0151", + "name": "CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-scheduler\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" + } + ] + }, + { + "controlID": "C-0152", + "name": "CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the kube scheduler is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue = matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" + } + ] + }, + { + "controlID": "C-0153", + "name": "CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", + "description": "Configure TLS encryption for the etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Client connections only over TLS would be served.", + "default_value": "By default, TLS encryption is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "etcd-tls-enabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Configure TLS encryption for the etcd service.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```\n\n#### Impact Statement\nClient connections only over TLS would be served.\n\n#### Default Value\nBy default, TLS encryption is not set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if tls is configured in a etcd service\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--cert-file\", \"\"],\n\t\t[\"--key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0154", + "name": "CIS-2.2 Ensure that the --client-cert-auth argument is set to true", + "description": "Enable client authentication on etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", + "default_value": "By default, the etcd service can be queried by unauthenticated clients.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "etcd-client-auth-cert", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Enable client authentication on etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```\n\n#### Impact Statement\nAll clients attempting to access the etcd server will require a valid client certificate.\n\n#### Default Value\nBy default, the etcd service can be queried by unauthenticated clients.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--client-cert-auth=false\", \"--client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0155", + "name": "CIS-2.3 Ensure that the --auto-tls argument is not set to true", + "description": "Do not use self-signed certificates for TLS.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", + "default_value": "By default, `--auto-tls` is set to `false`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "etcd-auto-tls-disabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Do not use self-signed certificates for TLS.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```\n\n#### Impact Statement\nClients will not be able to use self-signed certificates for TLS.\n\n#### Default Value\nBy default, `--auto-tls` is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Auto tls is enabled. Clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--auto-tls=true\")\n\tfixed = replace(cmd[i], \"--auto-tls=true\", \"--auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0156", + "name": "CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "etcd-peer-tls-enabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```\n\n#### Impact Statement\netcd cluster peers would need to set up TLS for their communication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if peer tls is enabled in etcd cluster\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd encryption for peer connection is not enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--peer-cert-file\", \"\"],\n\t\t[\"--peer-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [\"spec.containers[0].command\"],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0157", + "name": "CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", + "description": "etcd should be configured for peer authentication.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", + "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "etcd-peer-client-auth-cert", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "etcd should be configured for peer authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--peer-client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--peer-client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--peer-client-cert-auth=false\", \"--peer-client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0158", + "name": "CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "etcd-peer-auto-tls-disabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-auto-tls=true\")\n\tfixed = replace(cmd[i], \"--peer-auto-tls=true\", \"--peer-auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0159", + "name": "CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", + "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", + "default_value": "By default, no etcd certificate is created and used.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "etcd-unique-ca", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```\n\n#### Impact Statement\nAdditional management of the certificates and keys for the dedicated certificate authority will be required.\n\n#### Default Value\nBy default, no etcd certificate is created and used.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 2.7 https://workbench.cisecurity.org/sections/1126654/recommendations/1838578\n\ndeny[msga] {\n\tetcdPod := [pod | pod := input[_]; filter_input(pod, \"etcd\")]\n\tetcdCheckResult := get_argument_value_with_path(etcdPod[0].spec.containers[0].command, \"--trusted-ca-file\")\n\n\tapiserverPod := [pod | pod := input[_]; filter_input(pod, \"kube-apiserver\")]\n\tapiserverCheckResult := get_argument_value_with_path(apiserverPod[0].spec.containers[0].command, \"--client-ca-file\")\n\n\tetcdCheckResult.value == apiserverCheckResult.value\n\tmsga := {\n\t\t\"alertMessage\": \"Cert file is the same both for the api server and the etcd\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"failedPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"fixPaths\": [etcdCheckResult.fix_paths, apiserverCheckResult.fix_paths],\n\t\t\"alertObject\": {\"k8sApiObjects\": [etcdPod[0], apiserverPod[0]]},\n\t}\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"kube-apiserver\")\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"etcd\")\n}\n\nfilter_input(obj, res) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], res)\n}\n\nget_argument_value(command, argument) = value {\n\targs := split(command, \"=\")\n\tsome i, sprintf(\"%v\", [argument]) in args\n\tvalue := args[i + 1]\n}\n\nget_argument_value_with_path(cmd, argument) = result {\n\tcontains(cmd[i], argument)\n\targumentValue := get_argument_value(cmd[i], argument)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"path\": path,\n\t\t\"value\": argumentValue,\n\t\t\"fix_paths\": {\"path\": path, \"value\": \"\"},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0160", + "name": "CIS-3.2.1 Ensure that a minimal audit policy is created", + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", + "remediation": "Create an audit policy file for your cluster.", + "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", + "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-native-cis", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# CIS 3.2.1 https://workbench.cisecurity.org/sections/1126657/recommendations/1838582\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server(obj)\n\tcmd := obj.spec.containers[0].command\n\taudit_policy := [command | command := cmd[_]; contains(command, \"--audit-policy-file=\")]\n\tcount(audit_policy) < 1\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0161", + "name": "CIS-3.2.2 Ensure that the audit policy covers key security concerns", + "description": "Ensure that the audit policy created for the cluster covers key security concerns.", + "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", + "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", + "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", + "default_value": "By default Kubernetes clusters do not log audit information.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "audit-policy-content", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "APIServerInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\nimport future.keywords.in\n\n# CIS 3.2.2 https://workbench.cisecurity.org/sections/1126657/recommendations/1838583\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\tapi_server_info := obj.data.APIServerInfo\n\n\tnot contains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": api_server_info.cmdLine,\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\n\tapi_server_info := obj.data.APIServerInfo\n\n\tcontains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\trawPolicyFile := api_server_info.auditPolicyFile\n\tpolicyFile = yaml.unmarshal(base64.decode(rawPolicyFile.content))\n\n\tare_audit_file_rules_valid(policyFile.rules)\n\n\tfailed_obj := json.patch(policyFile, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"metadata\",\n\t\t\"value\": {\"name\": sprintf(\"%s - Audit policy file\", [obj.metadata.name])},\n\t}])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit policy rules do not cover key security areas or audit levels are invalid\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\n# Sample rules object\n# rules:\n# - level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nare_audit_file_rules_valid(rules) if {\n\tseeked_resources_with_audit_level := {\n\t\t\"secrets\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"configmaps\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"tokenreviews\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"pods\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"deployments\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/exec\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/portforward\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"services/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t}\n\n\t# Policy file must contain every resource\n\tsome resource, config in seeked_resources_with_audit_level\n\n\t# Every seeked resource mu have valid audit levels\n\tnot test_all_rules_against_one_seeked_resource(resource, config, rules)\n}\n\ntest_all_rules_against_one_seeked_resource(seeked_resource, value_of_seeked_resource, rules) if {\n\t# Filter down rules to only those concerning a seeked resource\n\trules_with_seeked_resource := [rule | rule := rules[_]; is_rule_concering_seeked_resource(rule, seeked_resource)]\n\trules_count := count(rules_with_seeked_resource)\n\n\t# Move forward only if there are some\n\trules_count > 0\n\n\t# Check if rules concerning seeked resource have valid audit levels\n\tvalid_rules := [rule | rule := rules_with_seeked_resource[_]; validate_rule_audit_level(rule, value_of_seeked_resource)]\n\tvalid_rules_count := count(valid_rules)\n\n\tvalid_rules_count > 0\n\n\t# Compare all rules for that specififc resource with those with valid rules, if amount of them differs,\n\t# it means that there are also some rules which invalid audit level\n\tvalid_rules_count == rules_count\n}\n\nis_rule_concering_seeked_resource(rule, seeked_resource) if {\n\tseeked_resource in rule.resources[_].resources\n}\n\n# Sample single rule:\n# \t level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nvalidate_rule_audit_level(rule, value_of_seeked_resource) := result if {\n\tvalue_of_seeked_resource.mode == \"equal\"\n\tresult := rule.level == value_of_seeked_resource.auditLevel\n} else := result {\n\tresult := rule.level != value_of_seeked_resource.auditLevel\n}\n\nis_api_server_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}" + } + ] + }, + { + "controlID": "C-0162", + "name": "CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kubelet` service file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0163", + "name": "CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0164", + "name": "CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, proxy file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeProxyInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0165", + "name": "CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `proxy` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeProxyInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0166", + "name": "CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file has permissions of `600`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0167", + "name": "CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0168", + "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0169", + "name": "CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0170", + "name": "CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0171", + "name": "CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0172", + "name": "CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0173", + "name": "CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-authorization-mode-alwaysAllow", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "Change authorization mode to Webhook.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [\"authorization.mode\"],\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0174", + "name": "CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0175", + "name": "CIS-4.2.4 Verify that the --read-only-port argument is set to 0", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "read-only-port-enabled-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet has read-only port enabled.", + "remediation": "Start the kubelet with the --read-only-port flag set to 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"reviewPaths\": [\"readOnlyPort\"],\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0176", + "name": "CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-streaming-connection-idle-timeout", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if a kubelet has not disabled timeouts on streaming connections", + "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0177", + "name": "CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "By default, `--protect-kernel-defaults` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-protect-kernel-defaults", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the --protect-kernel-defaults argument is set to true.", + "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"protectKernelDefaults\"],\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0178", + "name": "CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-ip-tables", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensures that the --make-iptables-util-chains argument is set to true.", + "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0179", + "name": "CIS-4.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", + "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", + "default_value": "By default, `--hostname-override` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-hostname-override", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --hostname-override argument is not set.", + "remediation": "Unset the --hostname-override argument.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0180", + "name": "CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "By default, `--event-qps` argument is set to `5`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-event-qps", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", + "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"eventRecordQPS\"],\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0181", + "name": "CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the Kubelets.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "validate-kubelet-tls-configuration-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletConfiguration", + "KubeletCommandLine" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", + "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t# get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0182", + "name": "CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", + "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet client certificate rotation is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-rotate-certificates", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --rotate-certificates argument is not set to false.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"rotateCertificates\"],\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0183", + "name": "CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet server certificate rotation is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-rotate-kubelet-server-certificate", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`RotateKubeletServerCertificate=true`, args[i])\n}\n" + } + ] + }, + { + "controlID": "C-0184", + "name": "CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "kubelet-strong-cryptographics-ciphers", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the Kubelet is configured to only use strong cryptographic ciphers.", + "remediation": "Change --tls-cipher-suites value of TLSCipherSuites property of config file to use strong cryptographics ciphers", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.13 https://workbench.cisecurity.org/sections/1126668/recommendations/1838663\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--tls-cipher-suites\")\n\n\tnot has_strong_cipher_set_via_cli(command)\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.TLSCipherSuites\n\n\tnot is_value_in_strong_cliphers_set(yamlConfig.TLSCipherSuites)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [\"TLSCipherSuites\"],\n\t\t\"failedPaths\": [\"TLSCipherSuites\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\nhas_strong_cipher_set_via_cli(command) {\n\tcontains(command, \"--tls-cipher-suites=\")\n\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome i\n\tcontains(command, sprintf(\"%v%v\", [\"--tls-cipher-suites=\", strong_cliphers[i]]))\n}\n\nis_value_in_strong_cliphers_set(value) {\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome x\n\tstrong_cliphers[x] == value\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "CIS-5.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "cluster-admin-role", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\n# regal ignore:rule-length\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-5.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-5.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-create-pod", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can create pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-5.1.5 Ensure that default service accounts are not actively used", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-default-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" + }, + { + "name": "namespace-without-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Namespace", + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace does not have service accounts (not incluiding default)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-bind-escalate", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can or bind escalate roles/clusterroles", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "controlID": "C-0192", + "name": "CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", + "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", + "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", + "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", + "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", + "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + }, + { + "name": "pod-security-admission-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "controlID": "C-0193", + "name": "CIS-5.2.2 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of privileged containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-baseline-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0194", + "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-baseline-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0195", + "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-baseline-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0196", + "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-baseline-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0197", + "name": "CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-restricted-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-restricted-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0198", + "name": "CIS-5.2.7 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-restricted-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-restricted-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0199", + "name": "CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", + "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-baseline-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0200", + "name": "CIS-5.2.9 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, there are no restrictions on adding capabilities to containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-restricted-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-restricted-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0201", + "name": "CIS-5.2.10 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-restricted-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-restricted-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0202", + "name": "CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", + "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", + "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-baseline-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0203", + "name": "CIS-5.2.12 Minimize the admission of HostPath volumes", + "description": "Do not generally admit containers which make use of `hostPath` volumes.", + "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-baseline-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0204", + "name": "CIS-5.2.13 Minimize the admission of containers which use HostPorts", + "description": "Do not generally permit containers which require the use of HostPorts.", + "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the use of HostPorts.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "pod-security-admission-baseline-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0205", + "name": "CIS-5.3.1 Ensure that the CNI in use supports Network Policies", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "This will depend on the CNI plugin in use.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-that-the-cni-in-use-supports-network-policies", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [], + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" + } + ] + }, + { + "name": "CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", + "default_value": "By default, network policies are not created.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "category": { + "name": "Workload", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-secrets-in-env-var", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if Pods have secrets in environment variables", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "CIS-5.4.2 Consider external secret storage", + "controlID": "C-0208", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "manual_test": "Review your secrets management implementation.", + "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" + ], + "attributes": {}, + "baseScore": 5, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "external-secret-storage", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" + } + ] + }, + { + "name": "CIS-5.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-namespaces", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + } + ], + "ruleDependencies": [], + "description": "lists all namespaces for users to review", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", + "controlID": "C-0210", + "description": "Enable `docker/default` seccomp profile in your pod definitions.", + "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", + "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", + "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "set-seccomp-profile-RuntimeDefault", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile as RuntimeDefault", + "remediation": "Make sure you define seccompProfile as RuntimeDefault at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n wl_spec := wl.spec\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl_spec := wl.spec.template.spec\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n wl_spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# container definition takes precedence\nget_seccompProfile_definition(wl, container, i, path_to_containers, path_to_search) = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type == \"RuntimeDefault\"\n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type != \"RuntimeDefault\"\n failed_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type == \"RuntimeDefault\" \n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type != \"RuntimeDefault\" \n\tfailed_path := sprintf(\"%s.%s\", [trim_suffix(concat(\".\", path_to_containers), \".containers\"), concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result{\n\tfix_path := [{\"path\": sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]), \"value\":\"RuntimeDefault\"}]\n\tseccompProfile_result := {\"failed\": true, \"failed_path\": [], \"fix_path\": fix_path}\n}\n" + } + ] + }, + { + "name": "CIS-5.7.3 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" + ], + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + }, + { + "name": "immutable-container-filesystem", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" + }, + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + }, + { + "name": "drop-capability-netraw", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "set-seLinuxOptions", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if workload and container do not define any seLinuxOptions", + "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-seccomp-profile", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-procmount-default", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n" + }, + { + "name": "set-fsgroup-value", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n" + }, + { + "name": "set-fsgroupchangepolicy-value", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" + }, + { + "name": "set-sysctls-params", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.sysctls is not set.", + "remediation": "Set securityContext.sysctls params", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has sysctls set\n not pod.spec.securityContext.sysctls\n\n path := \"spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.sysctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has sysctls set\n not wl.spec.template.spec.securityContext.sysctls\n\n path := \"spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.sysctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has sysctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.sysctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "set-supplementalgroups-values", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n\tnot pod.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n\tnot wl.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n\tnot cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n" + }, + { + "name": "rule-allow-privilege-escalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "CIS-5.7.4 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "rolebinding-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "role-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "configmap-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "endpoints-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Endpoints" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "persistentvolumeclaim-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PersistentVolumeClaim" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "podtemplate-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodTemplate" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "replicationcontroller-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ReplicationController" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "service-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "serviceaccount-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "endpointslice-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "discovery.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "EndpointSlice" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "horizontalpodautoscaler-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "autoscaling" + ], + "apiVersions": [ + "v2" + ], + "resources": [ + "HorizontalPodAutoscaler" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "lease-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "coordination.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Lease" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "csistoragecapacity-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "CSIStorageCapacity" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ingress-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "poddisruptionbudget-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodDisruptionBudget" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-secret-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Secret" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0092", + "C-0093", + "C-0094", + "C-0095", + "C-0096", + "C-0097", + "C-0098", + "C-0099", + "C-0100", + "C-0101", + "C-0102", + "C-0103", + "C-0104", + "C-0105", + "C-0106", + "C-0107", + "C-0108", + "C-0109", + "C-0110", + "C-0111", + "C-0112", + "C-0113", + "C-0114", + "C-0115", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0124", + "C-0125", + "C-0126", + "C-0127", + "C-0128", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0143", + "C-0144", + "C-0145", + "C-0146", + "C-0147", + "C-0148", + "C-0149", + "C-0150", + "C-0151", + "C-0152", + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159", + "C-0160", + "C-0161", + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0192", + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0210", + "C-0211", + "C-0212" + ] +} \ No newline at end of file diff --git a/releaseDev/clusterscan.json b/releaseDev/clusterscan.json new file mode 100644 index 000000000..d9ffbb2e0 --- /dev/null +++ b/releaseDev/clusterscan.json @@ -0,0 +1,1812 @@ +{ + "name": "ClusterScan", + "description": "Framework for scanning a cluster", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "security" + ], + "version": null, + "controls": [ + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ] + }, + { + "name": "RBAC enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rbac-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" + }, + { + "name": "rbac-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "insecure-port-flag", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "anonymous-access-enabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in case anonymous or unauthenticated user has any rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow anonymous users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n subject := rolebinding.subjects[i]\n isAnonymous(subject)\n delete_path := sprintf(\"subjects[%d]\", [i])\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"deletePaths\": [delete_path],\n \"failedPaths\": [delete_path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(subject) {\n subject.name == \"system:anonymous\"\n}\n\nisAnonymous(subject) {\n subject.name == \"system:unauthenticated\"\n}\n" + } + ] + }, + { + "controlID": "C-0265", + "name": "Authenticated user has sensitive permissions", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "system-authenticated-allowed-to-take-over-cluster", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in system:authenticated user has cluster takeover rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow system:authenticated users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n subjectVector := input[_]\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\n subject := rolebinding.subjects[k]\n # Check if the subject is gourp\n subject.kind == \"Group\"\n # Check if the subject is system:authenticated\n subject.name == \"system:authenticated\"\n\n\n # Find the bound roles\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n # Check if the role and rolebinding bound\n is_same_role_and_binding(role, rolebinding)\n\n\n # Check if the role has access to workloads, exec, attach, portforward\n\trule := role.rules[p]\n rule.resources[l] in [\"*\",\"pods\", \"pods/exec\", \"pods/attach\", \"pods/portforward\",\"deployments\",\"statefulset\",\"daemonset\",\"jobs\",\"cronjobs\",\"nodes\",\"secrets\"]\n\n\tfinalpath := array.concat([\"\"], [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [i]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": \"system:authenticated has sensitive roles\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\" : subjectVector\n\t\t},\n\t}\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"RoleBinding\"\n role.kind == \"Role\"\n rolebinding.metadata.namespace == role.metadata.namespace\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"ClusterRoleBinding\"\n role.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}" + } + ] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Roles with delete capabilities", + "attributes": { + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-excessive-delete-rights-v1", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-portforward-v1", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Validate admission controller (validating)", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0036", + "baseScore": 3.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-validating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Validate admission controller" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns validating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Validate admission controller (mutating)", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0039", + "baseScore": 4.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-mutating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Validate admission controller" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns mutating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-create-pod", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can create pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + } + ] + }, + { + "name": "Missing network policy", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ensure_network_policy_configured_in_labels", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "description": "fails if no networkpolicy configured in workload labels", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Exposure to internet", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-database-without-authentication", + "categories": [ + "Initial Access" + ] + } + ] + }, + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", + "controlID": "C-0256", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "exposure-to-internet", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n\t\t \"reviewPaths\": failPath,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n\n # Make sure that they belong to the same namespace\n svc.metadata.namespace == ingress.metadata.namespace\n\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [\n\t\t{\n\t \"object\": ingress,\n\t\t \"reviewPaths\": result,\n\t \"failedPaths\": result,\n\t },\n\t\t{\n\t \"object\": svc,\n\t\t}\n ]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n" + } + ] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-pid-ipc-privileges", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + } + ] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-network-access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + } + ] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-any-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0066", + "C-0088", + "C-0067", + "C-0005", + "C-0262", + "C-0265", + "C-0015", + "C-0002", + "C-0007", + "C-0063", + "C-0036", + "C-0039", + "C-0035", + "C-0188", + "C-0187", + "C-0012", + "C-0260", + "C-0256", + "C-0038", + "C-0041", + "C-0048", + "C-0057", + "C-0013" + ] +} \ No newline at end of file diff --git a/releaseDev/controls.json b/releaseDev/controls.json new file mode 100644 index 000000000..7002ee55d --- /dev/null +++ b/releaseDev/controls.json @@ -0,0 +1,7132 @@ +[ + { + "controlID": "C-0105", + "name": "Ensure that the admin.conf file ownership is set to root:root", + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" + ], + "rulesNames": [ + "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0108", + "name": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" + ], + "rulesNames": [ + "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" + ], + "attributes": {}, + "rulesNames": [ + "list-all-namespaces" + ], + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0106", + "name": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" + ], + "rulesNames": [ + "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Workloads with RCE vulnerabilities exposed to external traffic", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their pod has either LoadBalancer or NodePort service.", + "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the pod to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", + "rulesNames": [ + "exposed-rce-pods" + ], + "long_description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their pod has either LoadBalancer or NodePort service.", + "test": "This control enumerates external facing workloads, that have LoadBalancer or NodePort service and checks the image vulnerability information for the RCE vulnerability.", + "controlID": "C-0084", + "baseScore": 8.0, + "example": "@controls/examples/c84.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rulesNames": [ + "rule-credentials-in-env-var", + "rule-credentials-configmap" + ], + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" + ], + "attributes": {}, + "rulesNames": [ + "rule-secrets-in-env-var" + ], + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "category": { + "name": "Workload", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-cpu-limits" + ], + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0124", + "name": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used" + ], + "baseScore": 4, + "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", + "default_value": "By default, `SecurityContextDeny` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0216", + "name": "Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "rulesNames": [ + "psp-deny-hostnetwork" + ], + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0129", + "name": "Ensure that the API Server --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-profiling-argument-is-set-to-false" + ], + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0111", + "name": "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" + ], + "rulesNames": [ + "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Workload with cluster takeover roles", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Cluster Access" + ], + "displayRelatedResources": true, + "clickableResourceKind": "ServiceAccount" + } + ] + }, + "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", + "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", + "rulesNames": [ + "workload-with-cluster-takeover-roles" + ], + "long_description": "In Kubernetes, workloads with overly permissive roles pose a significant security risk. When a workload is granted roles that exceed the necessities of its operation, it creates an attack surface for privilege escalation within the cluster. This is especially critical if the roles include permissions for creating, updating, or accessing sensitive resources or secrets. An attacker exploiting such a workload can leverage these excessive privileges to perform unauthorized actions, potentially leading to a full cluster takeover. Ensuring that each service account associated with a workload is limited to permissions that are strictly necessary for its function is crucial in mitigating the risk of cluster takeovers.", + "test": "Check if the service account used by a workload has cluster takeover roles.", + "controlID": "C-0267", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0160", + "name": "Ensure that a minimal audit policy is created", + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", + "remediation": "Create an audit policy file for your cluster.", + "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" + ], + "attributes": {}, + "rulesNames": [ + "k8s-audit-logs-enabled-native-cis" + ], + "baseScore": 5, + "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", + "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0199", + "name": "Minimize the admission of containers with the NET_RAW capability", + "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-baseline-applied-1", + "pod-security-admission-baseline-applied-2" + ], + "baseScore": 6, + "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0226", + "name": "Prefer using a container-optimized OS when possible", + "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", + "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", + "remediation": "", + "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", + "references": [ + "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", + "https://aws.amazon.com/bottlerocket/" + ], + "attributes": {}, + "rulesNames": [ + "alert-container-optimized-os-not-in-use" + ], + "baseScore": 3, + "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", + "default_value": "A container-optimized OS is not the default.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0145", + "name": "Ensure that the Controller Manager --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-controller-manager-profiling-argument-is-set-to-false" + ], + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0167", + "name": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" + ], + "rulesNames": [ + "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0151", + "name": "Ensure that the Scheduler --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-scheduler-profiling-argument-is-set-to-false" + ], + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "rulesNames": [ + "rule-privilege-escalation" + ], + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "rulesNames": [ + "rule-can-list-get-secrets-v1" + ], + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0159", + "name": "Ensure that a unique Certificate Authority is used for etcd", + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", + "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" + ], + "attributes": {}, + "rulesNames": [ + "etcd-unique-ca" + ], + "baseScore": 8, + "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", + "default_value": "By default, no etcd certificate is created and used.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0134", + "name": "Ensure that the API Server --request-timeout argument is set as appropriate", + "description": "Set global request timeout for API server requests as appropriate.", + "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--request-timeout` is set to 60 seconds.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0112", + "name": "Ensure that the Kubernetes PKI key file permissions are set to 600", + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" + ], + "rulesNames": [ + "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0152", + "name": "Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1" + ], + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0122", + "name": "Ensure that the admission control plugin AlwaysAdmit is not set", + "description": "Do not allow all requests.", + "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set" + ], + "baseScore": 8, + "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", + "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Resources memory limit and request", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ], + "actionRequired": "configuration" + }, + "description": "This control identifies all Pods for which the memory limit is not set.", + "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-memory-limit-and-request" + ], + "controlID": "C-0004", + "example": "@controls/examples/c004.yaml", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0204", + "name": "Minimize the admission of containers which use HostPorts", + "description": "Do not generally permit containers which require the use of HostPorts.", + "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-baseline-applied-1", + "pod-security-admission-baseline-applied-2" + ], + "baseScore": 4, + "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the use of HostPorts.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "rulesNames": [ + "enforce-kubelet-client-tls-authentication-updated" + ], + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0102", + "name": "Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" + ], + "rulesNames": [ + "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory has permissions of `755`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "rulesNames": [ + "exposed-sensitive-interfaces-v1" + ], + "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", + "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", + "controlID": "C-0021", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0103", + "name": "Ensure that the etcd data directory ownership is set to etcd:etcd", + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd" + ], + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0163", + "name": "Ensure that the kubelet service file ownership is set to root:root", + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rulesNames": [ + "exec-into-container-v1" + ], + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0213", + "name": "Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" + ], + "attributes": {}, + "rulesNames": [ + "psp-deny-privileged-container" + ], + "baseScore": 8.0, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "name": "Workload with secret access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Secret Access" + ] + } + ] + }, + "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rulesNames": [ + "workload-mounted-secrets" + ], + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", + "controlID": "C-0255", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0113", + "name": "Ensure that the API Server --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the API server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false" + ], + "baseScore": 8, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0240", + "name": "Ensure Network Policy is Enabled and set as appropriate", + "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", + "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": {}, + "rulesNames": [ + "rule-cni-enabled-aks" + ], + "baseScore": 6, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0153", + "name": "Ensure that the --cert-file and --key-file arguments are set as appropriate", + "description": "Configure TLS encryption for the etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" + ], + "attributes": {}, + "rulesNames": [ + "etcd-tls-enabled" + ], + "baseScore": 8, + "impact_statement": "Client connections only over TLS would be served.", + "default_value": "By default, TLS encryption is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "rulesNames": [ + "ingress-and-egress-blocked" + ], + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rulesNames": [ + "rule-can-portforward-v1" + ], + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "rulesNames": [ + "nginx-ingress-snippet-annotation-vulnerability" + ], + "controlID": "C-0059", + "baseScore": 8.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure memory requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the memory requests are not set.", + "remediation": "Set the memory requests or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-memory-requests" + ], + "controlID": "C-0269", + "baseScore": 3.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0254", + "name": "Enable audit Logs", + "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", + "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", + "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "rulesNames": [ + "rule-manual" + ], + "baseScore": 5, + "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", + "default_value": "By default, cluster control plane logs aren't sent to be Logged.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0197", + "name": "Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-restricted-applied-1", + "pod-security-admission-restricted-applied-2" + ], + "baseScore": 6, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0133", + "name": "Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Validate admission controller (validating)", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "rulesNames": [ + "list-all-validating-webhooks" + ], + "controlID": "C-0036", + "baseScore": 3.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0248", + "name": "Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "rulesNames": [ + "ensure-clusters-are-created-with-private-nodes" + ], + "baseScore": 8, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "rulesNames": [ + "pods-in-default-namespace" + ], + "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0196", + "name": "Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-baseline-applied-1", + "pod-security-admission-baseline-applied-2" + ], + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "rulesNames": [ + "alert-rw-hostpath" + ], + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0180", + "name": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" + ], + "attributes": {}, + "rulesNames": [ + "kubelet-event-qps" + ], + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "By default, `--event-qps` argument is set to `5`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0217", + "name": "Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "rulesNames": [ + "psp-deny-allowprivilegeescalation" + ], + "baseScore": 6.0, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "rulesNames": [ + "insecure-capabilities" + ], + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0130", + "name": "Ensure that the API Server --audit-log-path argument is set", + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-audit-log-path-argument-is-set" + ], + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0127", + "name": "Ensure that the admission control plugin NodeRestriction is set", + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-NodeRestriction-is-set" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `NodeRestriction` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Ensure that the seccomp profile is set to docker/default in your pod definitions", + "controlID": "C-0210", + "description": "Enable `docker/default` seccomp profile in your pod definitions.", + "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", + "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" + ], + "attributes": {}, + "rulesNames": [ + "set-seccomp-profile-RuntimeDefault" + ], + "baseScore": 4, + "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", + "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0176", + "name": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" + ], + "attributes": {}, + "rulesNames": [ + "kubelet-streaming-connection-idle-timeout" + ], + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0110", + "name": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" + ], + "rulesNames": [ + "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-3172-aggregated-API-server-redirect", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [] + }, + "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "rulesNames": [ + "CVE-2022-3172" + ], + "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", + "controlID": "C-0089", + "baseScore": 3.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Outdated Kubernetes version", + "attributes": {}, + "description": "Identifies Kubernetes clusters running on outdated versions. Using old versions can expose clusters to known vulnerabilities, compatibility issues, and miss out on improved features and security patches. Keeping Kubernetes up-to-date is crucial for maintaining security and operational efficiency.", + "remediation": "Regularly update Kubernetes clusters to the latest stable version to mitigate known vulnerabilities and enhance functionality. Plan and execute upgrades considering workload compatibility, testing in a staging environment before applying changes to production. Follow Kubernetes' best practices for version management and upgrades to ensure a smooth transition and minimal downtime.", + "rulesNames": [ + "outdated-k8s-version" + ], + "long_description": "Running an outdated version of Kubernetes poses significant security risks and operational challenges. Older versions may contain unpatched vulnerabilities, leading to potential security breaches and unauthorized access. Additionally, outdated clusters might not support newer, more secure, and efficient features, impacting both performance and security. Regularly updating Kubernetes ensures compliance with the latest security standards and access to enhanced functionalities.", + "test": "Verifies the current Kubernetes version against the latest stable releases.", + "controlID": "C-0273", + "baseScore": 2.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CoreDNS poisoning", + "attributes": { + "microsoftMitreColumns": [ + "Lateral Movement" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", + "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", + "rulesNames": [ + "rule-can-update-configmap-v1" + ], + "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", + "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", + "controlID": "C-0037", + "baseScore": 4.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "rulesNames": [ + "container-image-repository", + "container-image-repository-v1" + ], + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0225", + "name": "Prefer using dedicated EKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", + "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" + ], + "attributes": {}, + "rulesNames": [ + "ensure-default-service-accounts-has-only-default-roles", + "automount-default-service-account" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0123", + "name": "Ensure that the admission control plugin AlwaysPullImages is set", + "description": "Always pull images.", + "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set" + ], + "baseScore": 4, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", + "default_value": "By default, `AlwaysPullImages` is not set.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0227", + "name": "Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": {}, + "rulesNames": [ + "ensure-endpointprivateaccess-is-enabled" + ], + "baseScore": 8.0, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", + "default_value": "By default, Endpoint Public Access is disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0198", + "name": "Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-restricted-applied-1", + "pod-security-admission-restricted-applied-2" + ], + "baseScore": 6, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0119", + "name": "Ensure that the API Server --authorization-mode argument includes Node", + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-authorization-mode-argument-includes-Node" + ], + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, `Node` authorization is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0251", + "name": "Minimize user access to Azure Container Registry (ACR)", + "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "rulesNames": [ + "list-role-definitions-in-acr" + ], + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "rulesNames": [ + "container-hostPort" + ], + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0238", + "name": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": {}, + "rulesNames": [ + "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive" + ], + "baseScore": 6, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0118", + "name": "Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not always authorize all requests.", + "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow" + ], + "baseScore": 7, + "impact_statement": "Only authorized requests will be served.", + "default_value": "By default, `AlwaysAllow` is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0233", + "name": "Consider Fargate for running untrusted workloads", + "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", + "long_description": "", + "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" + ], + "attributes": {}, + "rulesNames": [ + "alert-fargate-not-in-use" + ], + "baseScore": 3, + "impact_statement": "", + "default_value": "By default, AWS Fargate is not utilized.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "name": "Instance Metadata API", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "rulesNames": [ + "instance-metadata-api-access" + ], + "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", + "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", + "controlID": "C-0052", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "rulesNames": [ + "rule-list-all-cluster-admins-v1" + ], + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0136", + "name": "Ensure that the API Server --service-account-key-file argument is set as appropriate", + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate" + ], + "baseScore": 5, + "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-key-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0115", + "name": "Ensure that the API Server --DenyServiceExternalIPs is not set", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set" + ], + "baseScore": 4, + "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Workload with ConfigMap access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rulesNames": [ + "workload-mounted-configmap" + ], + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", + "controlID": "C-0258", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0245", + "name": "Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "rulesNames": [ + "encrypt-traffic-to-https-load-balancers-with-tls-certificates" + ], + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0235", + "name": "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive" + ], + "baseScore": 6.0, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0144", + "name": "Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", + "description": "Activate garbage collector on pod termination, as appropriate.", + "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0107", + "name": "Ensure that the scheduler.conf file ownership is set to root:root", + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" + ], + "rulesNames": [ + "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" + ], + "attributes": {}, + "rulesNames": [ + "pods-in-default-namespace", + "rolebinding-in-default-namespace", + "role-in-default-namespace", + "configmap-in-default-namespace", + "endpoints-in-default-namespace", + "persistentvolumeclaim-in-default-namespace", + "podtemplate-in-default-namespace", + "replicationcontroller-in-default-namespace", + "service-in-default-namespace", + "serviceaccount-in-default-namespace", + "endpointslice-in-default-namespace", + "horizontalpodautoscaler-in-default-namespace", + "lease-in-default-namespace", + "csistoragecapacity-in-default-namespace", + "ingress-in-default-namespace", + "poddisruptionbudget-in-default-namespace", + "resources-secret-in-default-namespace" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Forbidden Container Registries", + "attributes": { + "microsoftMitreColumns": [ + "Initial Access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "actionRequired": "configuration" + }, + "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", + "remediation": "Limit the registries from which you pull container images from", + "rulesNames": [ + "rule-identify-blocklisted-image-registries", + "rule-identify-blocklisted-image-registries-v1" + ], + "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", + "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", + "controlID": "C-0001", + "baseScore": 7.0, + "example": "@controls/examples/c001.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0262", + "name": "Anonymous user has RoleBinding", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": {}, + "rulesNames": [ + "anonymous-access-enabled" + ], + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0214", + "name": "Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "rulesNames": [ + "psp-deny-hostpid" + ], + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0128", + "name": "Ensure that the API Server --secure-port argument is not set to 0", + "description": "Do not disable the secure port.", + "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0" + ], + "baseScore": 8, + "impact_statement": "You need to set the API Server up with the right TLS certificates.", + "default_value": "By default, port 6443 is used as the secure port.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0093", + "name": "Ensure that the API server pod specification file ownership is set to root:root", + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" + ], + "rulesNames": [ + "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Naked pods", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", + "rulesNames": [ + "naked-pods" + ], + "long_description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "test": "Test if pods are not associated with Deployment, ReplicaSet etc. If not, fail.", + "controlID": "C-0073", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0104", + "name": "Ensure that the admin.conf file permissions are set to 600", + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" + ], + "rulesNames": [ + "ensure-that-the-admin.conf-file-permissions-are-set-to-600" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, admin.conf has permissions of `600`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "rulesNames": [ + "anonymous-requests-to-kubelet-service-updated" + ], + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Ensure that default service accounts are not actively used", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" + ], + "attributes": {}, + "rulesNames": [ + "automount-default-service-account", + "namespace-without-service-account" + ], + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0109", + "name": "Ensure that the controller-manager.conf file ownership is set to root:root", + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" + ], + "rulesNames": [ + "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0229", + "name": "Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", + "manual_test": "", + "references": [], + "attributes": {}, + "rulesNames": [ + "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks" + ], + "baseScore": 8.0, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "name": "Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" + ], + "attributes": {}, + "rulesNames": [ + "rule-can-create-pod" + ], + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0173", + "name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" + ], + "attributes": {}, + "rulesNames": [ + "kubelet-authorization-mode-alwaysAllow" + ], + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0141", + "name": "Ensure that the API Server --encryption-provider-config argument is set as appropriate", + "description": "Encrypt etcd key-value store.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate" + ], + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `--encryption-provider-config` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0161", + "name": "Ensure that the audit policy covers key security concerns", + "description": "Ensure that the audit policy created for the cluster covers key security concerns.", + "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", + "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", + "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" + ], + "attributes": {}, + "rulesNames": [ + "audit-policy-content" + ], + "baseScore": 5, + "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", + "default_value": "By default Kubernetes clusters do not log audit information.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0234", + "name": "Consider external secret storage", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "manual_test": "Review your secrets management implementation.", + "references": [], + "attributes": {}, + "rulesNames": [ + "ensure-external-secrets-storage-is-in-use" + ], + "baseScore": 6.0, + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0100", + "name": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0203", + "name": "Minimize the admission of HostPath volumes", + "description": "Do not generally admit containers which make use of `hostPath` volumes.", + "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-baseline-applied-1", + "pod-security-admission-baseline-applied-2" + ], + "baseScore": 6, + "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0157", + "name": "Ensure that the --peer-client-cert-auth argument is set to true", + "description": "etcd should be configured for peer authentication.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", + "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" + ], + "attributes": {}, + "rulesNames": [ + "etcd-peer-client-auth-cert" + ], + "baseScore": 7, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0094", + "name": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" + ], + "rulesNames": [ + "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0181", + "name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the Kubelets.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" + ], + "attributes": {}, + "rulesNames": [ + "validate-kubelet-tls-configuration-updated" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0183", + "name": "Verify that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" + ], + "attributes": {}, + "rulesNames": [ + "kubelet-rotate-kubelet-server-certificate" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet server certificate rotation is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0168", + "name": "Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" + ], + "rulesNames": [ + "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0193", + "name": "Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-baseline-applied-1", + "pod-security-admission-baseline-applied-2" + ], + "baseScore": 8, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of privileged containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "rulesNames": [ + "rule-allow-privilege-escalation" + ], + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "rulesNames": [ + "CVE-2022-23648" + ], + "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", + "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", + "controlID": "C-0087", + "baseScore": 7.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Exposure to internet", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-database-without-authentication", + "categories": [ + "Initial Access" + ] + } + ] + }, + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "rulesNames": [ + "exposure-to-internet" + ], + "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", + "controlID": "C-0256", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0099", + "name": "Ensure that the etcd pod specification file ownership is set to root:root", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" + ], + "rulesNames": [ + "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0241", + "name": "Use Azure RBAC for Kubernetes Authorization.", + "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", + "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", + "remediation": "Set Azure RBAC as access system.", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "rulesNames": [ + "ensure-azure-rbac-is-set" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "Kubernetes CronJob", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "rulesNames": [ + "rule-deny-cronjobs" + ], + "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "test": "We list all CronJobs that exist in cluster for the user to approve.", + "controlID": "C-0026", + "baseScore": 1.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0165", + "name": "If proxy kubeconfig file exists ensure ownership is set to root:root", + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" + ], + "rulesNames": [ + "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `proxy` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0121", + "name": "Ensure that the admission control plugin EventRateLimit is set", + "description": "Limit the rate at which the API server accepts requests.", + "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-EventRateLimit-is-set" + ], + "baseScore": 4, + "impact_statement": "You need to carefully tune in limits as per your environment.", + "default_value": "By default, `EventRateLimit` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0095", + "name": "Ensure that the controller manager pod specification file ownership is set to root:root", + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" + ], + "rulesNames": [ + "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "rulesNames": [ + "linux-hardening" + ], + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0182", + "name": "Ensure that the --rotate-certificates argument is not set to false", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", + "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" + ], + "attributes": {}, + "rulesNames": [ + "kubelet-rotate-certificates" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet client certificate rotation is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Image pull policy on latest tag", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", + "rulesNames": [ + "image-pull-policy-is-not-set-to-always" + ], + "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", + "test": "If imagePullPolicy = always pass, else fail.", + "controlID": "C-0075", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0220", + "name": "Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": {}, + "rulesNames": [ + "psp-required-drop-capabilities" + ], + "baseScore": 5.0, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0155", + "name": "Ensure that the --auto-tls argument is not set to true", + "description": "Do not use self-signed certificates for TLS.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" + ], + "attributes": {}, + "rulesNames": [ + "etcd-auto-tls-disabled" + ], + "baseScore": 6, + "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", + "default_value": "By default, `--auto-tls` is set to `false`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Missing network policy", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "rulesNames": [ + "ensure_network_policy_configured_in_labels" + ], + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ingress uses TLS", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control detect Ingress resources that do not use TLS", + "remediation": "The user needs to implement TLS for the Ingress resource in order to encrypt the incoming traffic", + "rulesNames": [ + "ingress-no-tls" + ], + "test": "Check if the Ingress resource has TLS configured", + "controlID": "C-0263", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0162", + "name": "Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kubelet` service file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0228", + "name": "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "Check for private endpoint access to the Kubernetes API server", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": {}, + "rulesNames": [ + "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks" + ], + "baseScore": 8.0, + "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", + "default_value": "By default, the Public Endpoint is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0201", + "name": "Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-restricted-applied-1", + "pod-security-admission-restricted-applied-2" + ], + "baseScore": 5, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0247", + "name": "Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "rulesNames": [ + "restrict-access-to-the-control-plane-endpoint" + ], + "baseScore": 8, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", + "default_value": "By default, Endpoint Private Access is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0131", + "name": "Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", + "description": "Retain the logs for at least 30 days or as appropriate.", + "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "SSH server running inside container", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "rulesNames": [ + "rule-can-ssh-to-pod-v1" + ], + "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", + "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", + "controlID": "C-0042", + "baseScore": 3.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0184", + "name": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" + ], + "attributes": {}, + "rulesNames": [ + "kubelet-strong-cryptographics-ciphers" + ], + "baseScore": 5, + "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" + ], + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "rulesNames": [ + "rule-privilege-escalation", + "immutable-container-filesystem", + "non-root-containers", + "drop-capability-netraw", + "set-seLinuxOptions", + "set-seccomp-profile", + "set-procmount-default", + "set-fsgroup-value", + "set-fsgroupchangepolicy-value", + "set-sysctls-params", + "set-supplementalgroups-values", + "rule-allow-privilege-escalation" + ], + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "rulesNames": [ + "k8s-audit-logs-enabled-cloud", + "k8s-audit-logs-enabled-native" + ], + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "rulesNames": [ + "secret-etcd-encryption-cloud", + "etcd-encryption-native" + ], + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0265", + "name": "system:authenticated user has elevated roles", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", + "attributes": {}, + "rulesNames": [ + "system-authenticated-allowed-to-take-over-cluster" + ], + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0244", + "name": "Ensure Kubernetes Secrets are encrypted", + "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "rulesNames": [ + "secret-etcd-encryption-cloud" + ], + "baseScore": 6, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "rulesNames": [ + "insecure-port-flag" + ], + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0179", + "name": "Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", + "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" + ], + "attributes": {}, + "rulesNames": [ + "kubelet-hostname-override" + ], + "baseScore": 3, + "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", + "default_value": "By default, `--hostname-override` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0140", + "name": "Ensure that the API Server --etcd-cafile argument is set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate" + ], + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-cafile` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Roles with delete capabilities", + "attributes": { + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "rulesNames": [ + "rule-excessive-delete-rights-v1" + ], + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0221", + "name": "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", + "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", + "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" + ], + "attributes": {}, + "rulesNames": [ + "ensure-image-scanning-enabled-cloud" + ], + "baseScore": 5, + "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0222", + "name": "Minimize user access to Amazon ECR", + "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" + ], + "attributes": {}, + "rulesNames": [ + "ensure-aws-policies-are-present" + ], + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0147", + "name": "Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate" + ], + "baseScore": 6, + "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-private-key-file` it not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-39328-grafana-auth-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", + "remediation": "Update your Grafana to 9.2.4 or above", + "rulesNames": [ + "CVE-2022-39328" + ], + "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", + "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", + "controlID": "C-0090", + "baseScore": 9.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0195", + "name": "Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-baseline-applied-1", + "pod-security-admission-baseline-applied-2" + ], + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0120", + "name": "Ensure that the API Server --authorization-mode argument includes RBAC", + "description": "Turn on Role Based Access Control.", + "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC" + ], + "baseScore": 8, + "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", + "default_value": "By default, `RBAC` authorization is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" + ], + "attributes": {}, + "rulesNames": [ + "rule-can-bind-escalate", + "rule-can-impersonate-users-groups-v1" + ], + "baseScore": 6, + "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0205", + "name": "Ensure that the CNI in use supports Network Policies", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-cni-in-use-supports-network-policies" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "This will depend on the CNI plugin in use.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0232", + "name": "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", + "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", + "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", + "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", + "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" + ], + "attributes": {}, + "rulesNames": [ + "review-roles-with-aws-iam-authenticator" + ], + "baseScore": 7, + "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", + "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0246", + "name": "Avoid use of system:masters group", + "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", + "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", + "remediation": "Remove the `system:masters` group from all users in the cluster.", + "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", + "references": [ + "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" + ], + "attributes": {}, + "rulesNames": [ + "rule-manual" + ], + "baseScore": 8, + "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", + "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "rulesNames": [ + "host-network-access" + ], + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Consider external secret storage", + "controlID": "C-0208", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "manual_test": "Review your secrets management implementation.", + "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" + ], + "attributes": {}, + "rulesNames": [ + "external-secret-storage" + ], + "baseScore": 5, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "rulesNames": [ + "immutable-container-filesystem" + ], + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" + ], + "attributes": {}, + "rulesNames": [ + "cluster-admin-role" + ], + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0172", + "name": "Ensure that the --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" + ], + "attributes": {}, + "rulesNames": [ + "anonymous-requests-to-kubelet-service-updated" + ], + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0218", + "name": "Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" + ], + "attributes": {}, + "rulesNames": [ + "psp-deny-root-container" + ], + "baseScore": 6.0, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "rulesNames": [ + "k8s-common-labels-usage" + ], + "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", + "controlID": "C-0077", + "baseScore": 2.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0200", + "name": "Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-restricted-applied-1", + "pod-security-admission-restricted-applied-2" + ], + "baseScore": 5, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, there are no restrictions on adding capabilities to containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure CPU requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the CPU requests are not set.", + "remediation": "Set the CPU requests or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-cpu-requests" + ], + "controlID": "C-0268", + "baseScore": 3.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0174", + "name": "Ensure that the --client-ca-file argument is set as appropriate", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" + ], + "attributes": {}, + "rulesNames": [ + "enforce-kubelet-client-tls-authentication-updated" + ], + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0242", + "name": "Hostile multi-tenant workloads", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "rulesNames": [ + "rule-hostile-multitenant-workloads" + ], + "baseScore": 5, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "ServiceAccount token mounted", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", + "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", + "rulesNames": [ + "serviceaccount-token-mount" + ], + "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", + "controlID": "C-0261", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0098", + "name": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" + ], + "rulesNames": [ + "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0135", + "name": "Ensure that the API Server --service-account-lookup argument is set to true", + "description": "Validate service account before validating token.", + "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `--service-account-lookup` argument is set to `true`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0171", + "name": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" + ], + "rulesNames": [ + "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0170", + "name": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" + ], + "rulesNames": [ + "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0175", + "name": "Verify that the --read-only-port argument is set to 0", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" + ], + "attributes": {}, + "rulesNames": [ + "read-only-port-enabled-updated" + ], + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Sudo in container entrypoint", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "rulesNames": [ + "sudo-in-container-entrypoint" + ], + "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "test": "Check that there is no 'sudo' in the container entrypoint", + "controlID": "C-0062", + "baseScore": 5.0, + "example": "@controls/examples/c062.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "rulesNames": [ + "automount-service-account" + ], + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0154", + "name": "Ensure that the --client-cert-auth argument is set to true", + "description": "Enable client authentication on etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" + ], + "attributes": {}, + "rulesNames": [ + "etcd-client-auth-cert" + ], + "baseScore": 8, + "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", + "default_value": "By default, the etcd service can be queried by unauthenticated clients.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0101", + "name": "Ensure that the Container Network Interface file ownership is set to root:root", + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Resource limits", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/pod manifests.", + "rulesNames": [ + "resource-policies" + ], + "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", + "controlID": "C-0009", + "baseScore": 7.0, + "example": "@controls/examples/c009.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0117", + "name": "Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", + "description": "Verify kubelet's certificate before establishing connection.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate" + ], + "baseScore": 8, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" + ], + "attributes": {}, + "rulesNames": [ + "internal-networking" + ], + "baseScore": 4, + "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", + "default_value": "By default, network policies are not created.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0252", + "name": "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": {}, + "rulesNames": [ + "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled" + ], + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0137", + "name": "Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate" + ], + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0178", + "name": "Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" + ], + "attributes": {}, + "rulesNames": [ + "kubelet-ip-tables" + ], + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0166", + "name": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" + ], + "rulesNames": [ + "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file has permissions of `600`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Workloads with Critical vulnerabilities exposed to external traffic", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service is assigned to them.", + "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the pod to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", + "rulesNames": [ + "exposed-critical-pods" + ], + "long_description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service assigned to them.", + "test": "This control enumerates external facing workloads, that have LoadBalancer or NodePort services and checks image vulnerability information to see if the image has critical vulnerabilities.", + "controlID": "C-0083", + "baseScore": 8.0, + "example": "@controls/examples/c83.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0146", + "name": "Ensure that the Controller Manager --use-service-account-credentials argument is set to true", + "description": "Use individual service account credentials for each controller.", + "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true" + ], + "baseScore": 4, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", + "default_value": "By default, `--use-service-account-credentials` is set to false.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0192", + "name": "Ensure that the cluster has at least one active policy control mechanism in place", + "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", + "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", + "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", + "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-applied-1", + "pod-security-admission-applied-2" + ], + "baseScore": 4, + "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", + "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workload with credential access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "This control checks if workloads specifications have sensitive information in their environment variables.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rulesNames": [ + "rule-credentials-in-env-var" + ], + "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", + "controlID": "C-0259", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0148", + "name": "Ensure that the Controller Manager --root-ca-file argument is set as appropriate", + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate" + ], + "baseScore": 7, + "impact_statement": "You need to setup and maintain root certificate authority file.", + "default_value": "By default, `--root-ca-file` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0239", + "name": "Prefer using dedicated AKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", + "references": [ + "" + ], + "attributes": {}, + "rulesNames": [ + "ensure-default-service-accounts-has-only-default-roles" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0236", + "name": "Verify image signature", + "description": "Verifies the signature of each image with given public keys", + "long_description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "manual_test": "", + "references": [], + "attributes": { + "actionRequired": "configuration" + }, + "rulesNames": [ + "verify-image-signature" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0215", + "name": "Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "rulesNames": [ + "psp-deny-hostipc" + ], + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "RBAC enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "rulesNames": [ + "rbac-enabled-cloud", + "rbac-enabled-native" + ], + "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Workloads with excessive amount of vulnerabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", + "remediation": "Update your workload images as soon as possible when fixes become available.", + "rulesNames": [ + "excessive_amount_of_vulnerabilities_pods" + ], + "long_description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", + "test": "This control enumerates workloads and checks if they have excessive amount of vulnerabilities in their container images. The threshold of \u201cexcessive number\u201d is configurable.", + "controlID": "C-0085", + "baseScore": 6.0, + "example": "@controls/examples/c85.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0243", + "name": "Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "rulesNames": [ + "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider" + ], + "baseScore": 5, + "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0096", + "name": "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" + ], + "rulesNames": [ + "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Workload with PVC access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rulesNames": [ + "workload-mounted-pvc" + ], + "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", + "controlID": "C-0257", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "rules": [] + }, + { + "controlID": "C-0194", + "name": "Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-baseline-applied-1", + "pod-security-admission-baseline-applied-2" + ], + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" + ], + "attributes": {}, + "rulesNames": [ + "automount-service-account" + ], + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "rulesNames": [ + "host-pid-ipc-privileges" + ], + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0116", + "name": "Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "description": "Enable certificate based kubelet authentication.", + "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate" + ], + "baseScore": 7, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, certificate-based kubelet authentication is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Validate admission controller (mutating)", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "rulesNames": [ + "list-all-mutating-webhooks" + ], + "controlID": "C-0039", + "baseScore": 4.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0219", + "name": "Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": {}, + "rulesNames": [ + "psp-deny-allowed-capabilities" + ], + "baseScore": 5.0, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0125", + "name": "Ensure that the admission control plugin ServiceAccount is set", + "description": "Automate service accounts management.", + "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-ServiceAccount-is-set" + ], + "baseScore": 3, + "impact_statement": "None.", + "default_value": "By default, `ServiceAccount` is set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "rulesNames": [ + "internal-networking" + ], + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workload with administrative roles", + "attributes": {}, + "description": "This control identifies workloads where the associated service accounts have roles that grant administrative-level access across the cluster. Granting a workload such expansive permissions equates to providing it cluster admin roles. This level of access can pose a significant security risk, as it allows the workload to perform any action on any resource, potentially leading to unauthorized data access or cluster modifications.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use service accounts with such high permissions for daily operations.", + "rulesNames": [ + "workload-with-administrative-roles" + ], + "long_description": "In Kubernetes environments, workloads granted administrative-level privileges without restrictions represent a critical security vulnerability. When a service account associated with a workload is configured with permissions to perform any action on any resource, it essentially holds unrestricted access within the cluster, akin to cluster admin privileges. This configuration dramatically increases the risk of security breaches, including data theft, unauthorized modifications, and potentially full cluster takeovers. Such privileges allow attackers to exploit the workload for wide-ranging malicious activities, bypassing the principle of least privilege. Therefore, it's essential to follow the least privilege principle and make sure cluster admin permissions are granted only when it is absolutely necessary.", + "test": "Check if the service account used by a workload has cluster admin roles, either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges.", + "controlID": "C-0272", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0249", + "name": "Restrict untrusted workloads", + "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", + "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "actionRequired": "manual review" + }, + "rulesNames": [ + "rule-manual" + ], + "baseScore": 5, + "impact_statement": "", + "default_value": "ACI is not a default component of the AKS", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "rulesNames": [ + "alert-any-hostpath" + ], + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0114", + "name": "Ensure that the API Server --token-auth-file parameter is not set", + "description": "Do not use token based authentication.", + "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-token-auth-file-parameter-is-not-set" + ], + "baseScore": 8, + "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "rulesNames": [ + "psp-enabled-cloud", + "psp-enabled-native" + ], + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0143", + "name": "Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers" + ], + "baseScore": 5, + "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "rulesNames": [ + "CVE-2022-24348" + ], + "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0081", + "baseScore": 4.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "rulesNames": [ + "CVE-2022-0185" + ], + "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0079", + "baseScore": 4.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0250", + "name": "Minimize cluster access to read-only for Azure Container Registry (ACR)", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", + "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "rulesNames": [ + "ensure-service-principle-has-read-only-permissions" + ], + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "rulesNames": [ + "CVE-2022-47633" + ], + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", + "controlID": "C-0091", + "baseScore": 8.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Configured readiness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "rulesNames": [ + "configured-readiness-probe" + ], + "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "controlID": "C-0018", + "example": "@controls/examples/c018.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "rulesNames": [ + "containers-mounting-docker-socket" + ], + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0138", + "name": "Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate" + ], + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0164", + "name": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" + ], + "rulesNames": [ + "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, proxy file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0149", + "name": "Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation on controller-manager.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0177", + "name": "Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" + ], + "attributes": {}, + "rulesNames": [ + "kubelet-protect-kernel-defaults" + ], + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "By default, `--protect-kernel-defaults` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0150", + "name": "Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1" + ], + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Mount service principal", + "attributes": { + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "rulesNames": [ + "alert-mount-potential-credentials-paths" + ], + "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", + "controlID": "C-0020", + "baseScore": 4.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Access Kubernetes dashboard", + "attributes": { + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "rulesNames": [ + "rule-access-dashboard-subject-v1", + "rule-access-dashboard-wl-v1" + ], + "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", + "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", + "controlID": "C-0014", + "baseScore": 2.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" + ], + "attributes": {}, + "rulesNames": [ + "rule-can-list-get-secrets-v1" + ], + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "PersistentVolume without encyption", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control detects PersistentVolumes without encyption", + "remediation": "Enable encryption on the PersistentVolume using the configuration in StorageClass", + "rulesNames": [ + "pv-without-encryption" + ], + "test": "Checking all PersistentVolumes via their StorageClass for encryption", + "controlID": "C-0264", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" + ], + "attributes": {}, + "rulesNames": [ + "rule-list-all-cluster-admins-v1" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "rulesNames": [ + "non-root-containers" + ], + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0202", + "name": "Minimize the admission of Windows HostProcess Containers", + "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", + "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" + ], + "attributes": {}, + "rulesNames": [ + "pod-security-admission-baseline-applied-1", + "pod-security-admission-baseline-applied-2" + ], + "baseScore": 7, + "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "rulesNames": [ + "Symlink-Exchange-Can-Allow-Host-Filesystem-Access" + ], + "controlID": "C-0058", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "rulesNames": [ + "label-usage-for-resources" + ], + "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "controlID": "C-0076", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Resources CPU limit and request", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the CPU limit is not set.", + "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-cpu-limit-and-request" + ], + "controlID": "C-0050", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0142", + "name": "Ensure that encryption providers are appropriately configured", + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-encryption-providers-are-appropriately-configured" + ], + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, no encryption provider is set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0237", + "name": "Check if signature exists", + "description": "Ensures that all images contain some signature", + "long_description": "Verifies that each image is signed", + "remediation": "Replace the image with a signed image", + "manual_test": "", + "references": [], + "attributes": {}, + "rulesNames": [ + "has-image-signature" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0169", + "name": "Ensure that the client certificate authorities file ownership is set to root:root", + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" + ], + "rulesNames": [ + "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0156", + "name": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" + ], + "attributes": {}, + "rulesNames": [ + "etcd-peer-tls-enabled" + ], + "baseScore": 7, + "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0223", + "name": "Minimize cluster access to read-only for Amazon ECR", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", + "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", + "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" + ], + "attributes": {}, + "rulesNames": [ + "ensure_nodeinstancerole_has_right_permissions_for_ecr" + ], + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0231", + "name": "Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" + ], + "attributes": {}, + "rulesNames": [ + "ensure-https-loadbalancers-encrypted-with-tls-aws" + ], + "baseScore": 5.0, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0132", + "name": "Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", + "description": "Retain 10 or an appropriate number of old log files.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0158", + "name": "Ensure that the --peer-auto-tls argument is not set to true", + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" + ], + "attributes": {}, + "rulesNames": [ + "etcd-peer-auto-tls-disabled" + ], + "baseScore": 6, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Deprecated Kubernetes image registry", + "attributes": {}, + "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", + "rulesNames": [ + "rule-identify-old-k8s-registry" + ], + "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", + "controlID": "C-0253", + "baseScore": 5.0, + "example": "@controls/examples/c239.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Access container service account", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", + "rulesNames": [ + "access-container-service-account-v1" + ], + "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", + "controlID": "C-0053", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Configured liveness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "rulesNames": [ + "configured-liveness-probe" + ], + "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "controlID": "C-0056", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0097", + "name": "Ensure that the scheduler pod specification file ownership is set to root:root", + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" + ], + "rulesNames": [ + "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0230", + "name": "Ensure Network Policy is Enabled and set as appropriate", + "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", + "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", + "remediation": "", + "manual_test": "", + "references": [], + "attributes": {}, + "rulesNames": [ + "ensure-network-policy-is-enabled-eks" + ], + "baseScore": 6.0, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0092", + "name": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" + ], + "rulesNames": [ + "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Network mapping", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "rulesNames": [ + "internal-networking" + ], + "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0049", + "baseScore": 3.0, + "example": "@controls/examples/c049.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "No impersonation", + "attributes": { + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", + "rulesNames": [ + "rule-can-impersonate-users-groups-v1" + ], + "controlID": "C-0065", + "baseScore": 6.0, + "example": "@controls/examples/c065.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0126", + "name": "Ensure that the admission control plugin NamespaceLifecycle is set", + "description": "Reject creating objects in a namespace that is undergoing termination.", + "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set" + ], + "baseScore": 3, + "impact_statement": "None", + "default_value": "By default, `NamespaceLifecycle` is set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Delete Kubernetes events", + "attributes": { + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "rulesNames": [ + "rule-can-delete-k8s-events-v1" + ], + "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", + "test": "List who has delete/deletecollection RBAC permissions on events.", + "controlID": "C-0031", + "baseScore": 4.0, + "example": "@controls/examples/c031.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-memory-limits" + ], + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0139", + "name": "Ensure that the API Server --client-ca-file argument is set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" + ], + "attributes": {}, + "rulesNames": [ + "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate" + ], + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + } +] \ No newline at end of file diff --git a/releaseDev/default_config_inputs.json b/releaseDev/default_config_inputs.json new file mode 100644 index 000000000..6d17fc2da --- /dev/null +++ b/releaseDev/default_config_inputs.json @@ -0,0 +1,145 @@ +{ + "name": "default", + "attributes": {}, + "scope": { + "designatorType": "attributes", + "attributes": {} + }, + "settings": { + "postureControlInputs": { + "imageRepositoryAllowList": [], + "trustedCosignPublicKeys": [], + "insecureCapabilities": [ + "SETPCAP", + "NET_ADMIN", + "NET_RAW", + "SYS_MODULE", + "SYS_RAWIO", + "SYS_PTRACE", + "SYS_ADMIN", + "SYS_BOOT", + "MAC_OVERRIDE", + "MAC_ADMIN", + "PERFMON", + "ALL", + "BPF" + ], + "listOfDangerousArtifacts": [ + "bin/bash", + "sbin/sh", + "bin/ksh", + "bin/tcsh", + "bin/zsh", + "usr/bin/scsh", + "bin/csh", + "bin/busybox", + "usr/bin/busybox" + ], + "publicRegistries": [], + "sensitiveInterfaces": [ + "nifi", + "argo-server", + "weave-scope-app", + "kubeflow", + "kubernetes-dashboard", + "jenkins", + "prometheus-deployment" + ], + "max_critical_vulnerabilities": [ + "5" + ], + "max_high_vulnerabilities": [ + "10" + ], + "sensitiveKeyNames": [ + "aws_access_key_id", + "aws_secret_access_key", + "azure_batchai_storage_account", + "azure_batchai_storage_key", + "azure_batch_account", + "azure_batch_key", + "secret", + "key", + "password", + "pwd", + "token", + "jwt", + "bearer", + "credential" + ], + "sensitiveValues": [ + "BEGIN \\w+ PRIVATE KEY", + "PRIVATE KEY", + "eyJhbGciO", + "JWT", + "Bearer", + "_key_", + "_secret_" + ], + "sensitiveKeyNamesAllowed": [], + "sensitiveValuesAllowed": [], + "servicesNames": [ + "nifi-service", + "argo-server", + "minio", + "postgres", + "workflow-controller-metrics", + "weave-scope-app", + "kubernetes-dashboard" + ], + "untrustedRegistries": [], + "memory_request_max": [], + "memory_request_min": [ + "0" + ], + "memory_limit_max": [], + "memory_limit_min": [ + "0" + ], + "cpu_request_max": [], + "cpu_request_min": [ + "0" + ], + "cpu_limit_max": [], + "cpu_limit_min": [ + "0" + ], + "wlKnownNames": [ + "coredns", + "kube-proxy", + "event-exporter-gke", + "kube-dns", + "17-default-backend", + "metrics-server", + "ca-audit", + "ca-dashboard-aggregator", + "ca-notification-server", + "ca-ocimage", + "ca-oracle", + "ca-posture", + "ca-rbac", + "ca-vuln-scan", + "ca-webhook", + "ca-websocket", + "clair-clair" + ], + "recommendedLabels": [ + "app", + "tier", + "phase", + "version", + "owner", + "env" + ], + "k8sRecommendedLabels": [ + "app.kubernetes.io/name", + "app.kubernetes.io/instance", + "app.kubernetes.io/version", + "app.kubernetes.io/component", + "app.kubernetes.io/part-of", + "app.kubernetes.io/managed-by", + "app.kubernetes.io/created-by" + ] + } + } +} \ No newline at end of file diff --git a/releaseDev/devopsbest.json b/releaseDev/devopsbest.json new file mode 100644 index 000000000..e0ce34597 --- /dev/null +++ b/releaseDev/devopsbest.json @@ -0,0 +1,1107 @@ +{ + "name": "DevOpsBest", + "description": "", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Configured readiness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "controlID": "C-0018", + "example": "@controls/examples/c018.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "configured-readiness-probe", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Readiness probe is not configured", + "remediation": "Ensure Readiness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Configured liveness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "controlID": "C-0056", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "configured-liveness-probe", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Liveness probe is not configured", + "remediation": "Ensure Liveness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + }, + { + "name": "Naked pods", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", + "long_description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "test": "Test if pods are not associated with Deployment, ReplicaSet etc. If not, fail.", + "controlID": "C-0073", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "naked-pods", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", + "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "containers-mounting-docker-socket", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/run/containerd/containerd.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/crio/crio.sock\"\n}\n" + } + ] + }, + { + "name": "Image pull policy on latest tag", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", + "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", + "test": "If imagePullPolicy = always pass, else fail.", + "controlID": "C-0075", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "image-pull-policy-is-not-set-to-always", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" + } + ] + }, + { + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "controlID": "C-0076", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "label-usage-for-resources", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.recommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.recommendedLabels", + "name": "Recommended Labels", + "description": "Kubescape checks that workloads have at least one label that identifies semantic attributes." + } + ], + "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" + } + ] + }, + { + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", + "controlID": "C-0077", + "baseScore": 2.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "k8s-common-labels-usage", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.k8sRecommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.k8sRecommendedLabels", + "name": "Kubernetes Recommended Labels", + "description": "Kubescape checks that workloads have at least one of this list of configurable labels, as recommended in the Kubernetes documentation." + } + ], + "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" + } + ] + }, + { + "name": "Deprecated Kubernetes image registry", + "attributes": {}, + "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", + "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", + "controlID": "C-0253", + "baseScore": 5.0, + "example": "@controls/examples/c239.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-identify-old-k8s-registry", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Identifying if pod container images are from deprecated K8s registry", + "remediation": "Use images new registry", + "ruleQuery": "", + "rule": "package armo_builtins\n\ndeprecatedK8sRepo[msga] {\n\tpod := input[_]\n\tpod.metadata.namespace == \"kube-system\"\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecated_registry(image){\n\tstartswith(image, \"k8s.gcr.io/\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.metadata.namespace == \"kube-system\"\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\n" + } + ] + }, + { + "name": "Ensure CPU requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the CPU requests are not set.", + "remediation": "Set the CPU requests or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0268", + "baseScore": 3.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-cpu-requests", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU requests are not set.", + "remediation": "Ensure CPU requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ==================================== no CPU requests =============================================\n# Fails if pod does not have container with CPU request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU requests\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU requests\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Ensure memory requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the memory requests are not set.", + "remediation": "Set the memory requests or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0269", + "baseScore": 3.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-memory-requests", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory requests are not set.", + "remediation": "Ensure memory requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ================================== no memory requests ==================================\n# Fails if pod does not have container with memory requests\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n" + } + ] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-cpu-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU limits are not set.", + "remediation": "Ensure CPU limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-memory-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory limits are not set.", + "remediation": "Ensure memory limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0018", + "C-0044", + "C-0056", + "C-0061", + "C-0073", + "C-0074", + "C-0075", + "C-0076", + "C-0077", + "C-0253", + "C-0268", + "C-0269", + "C-0270", + "C-0271" + ] +} \ No newline at end of file diff --git a/releaseDev/exceptions.json b/releaseDev/exceptions.json new file mode 100644 index 000000000..8f9b40652 --- /dev/null +++ b/releaseDev/exceptions.json @@ -0,0 +1,7820 @@ +[ + { + "name": "exclude-default-namespace-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "name": "kubescape", + "namespace": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-default-namespace-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-default-namespace-resources-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "default", + "namespace": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-pod-kube-apiserver", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-apiserver-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0013" + }, + { + "controlID": "c-0077" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0020" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0034" + }, + { + "controlID": "c-0016" + }, + { + "controlID": "C-0270" + }, + { + "controlID": "C-0271" + }, + { + "controlID": "c-0048" + }, + { + "controlID": "c-0041" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0076" + }, + { + "controlID": "c-0237" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + }, + { + "controlID": "c-0038" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0076" + }, + { + "controlID": "c-0237" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + }, + { + "controlID": "c-0038" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gateway", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0076" + }, + { + "controlID": "c-0237" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + }, + { + "controlID": "c-0038" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "synchronizer", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0076" + }, + { + "controlID": "c-0237" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + }, + { + "controlID": "c-0038" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0076" + }, + { + "controlID": "c-0237" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + }, + { + "controlID": "c-0038" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0076" + }, + { + "controlID": "c-0237" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + }, + { + "controlID": "c-0038" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "storage", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0076" + }, + { + "controlID": "c-0237" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + }, + { + "controlID": "c-0038" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "otel-collector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0076" + }, + { + "controlID": "c-0237" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + }, + { + "controlID": "c-0038" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "node-agent", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0076" + }, + { + "controlID": "c-0237" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + }, + { + "controlID": "c-0038" + } + ] + }, + { + "name": "exclude-kubescape-deployment-allowed-registry-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-deployment-allowed-registry-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-deployment-allowed-registry-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gateway", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-deployment-allowed-registry-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-deployment-allowed-registry-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0013" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0013" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gateway", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0013" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0013" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0013" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "node-agent", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0045" + }, + { + "controlID": "c-0046" + }, + { + "controlID": "c-0048" + }, + { + "controlID": "c-0057" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0016" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0034" + }, + { + "controlID": "c-0074" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + } + ] + }, + { + "name": "exclude-ks-service-account", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "ks-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-kubescape-service-account", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kubescape-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-kubescape-default-service-account", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "default", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0189" + }, + { + "controlID": "c-0190" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "ks-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0015" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0186" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "storage", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0015" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0186" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kubescape-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0015" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0186" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "node-agent", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0015" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0186" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0015" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0186" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "storage-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0015" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0186" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "synchronizer", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0015" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0186" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "node-agent-service-account", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0207" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0015" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0186" + } + ] + }, + { + "name": "exclude-kubescape-otel", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "otel-collector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + } + ] + }, + { + "name": "exclude-kubescape-host-scanner-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "host-scanner", + "namespace": "kubescape-host-scanner" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kubescape-host-scanner-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "host-scanner", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-schedulers-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "name": "kubevuln-schedule-.*", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0026" + }, + { + "controlID": "c-0076" + }, + { + "controlID": "c-0077" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + } + ] + }, + { + "name": "exclude-schedulers-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "name": "kubescape-registry-scan-.*", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0026" + }, + { + "controlID": "c-0076" + }, + { + "controlID": "c-0077" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + } + ] + }, + { + "name": "exclude-schedulers-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "name": "kubevuln-scheduler", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0026" + }, + { + "controlID": "c-0076" + }, + { + "controlID": "c-0077" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + } + ] + }, + { + "name": "exclude-schedulers-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "name": "kubescape-scheduler", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0026" + }, + { + "controlID": "c-0076" + }, + { + "controlID": "c-0077" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + } + ] + }, + { + "name": "exclude-storage-apiserver", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "storage-apiserver", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0034" + }, + { + "controlID": "c-0260" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0076" + } + ] + }, + { + "name": "exclude-ns", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kubescape" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kubescape-prometheus-security-context", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape-prometheus" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0055" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + } + ] + }, + { + "name": "exclude-kubescape-prometheus-deployment-allowed-registry", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape-prometheus" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-prometheus-deployment-ingress-and-egress", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape-prometheus" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "coredns-[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-proxy-[A-Za-z0-9-]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "etcd-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "metadata-proxy-v[0-9.]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "node-local-dns" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "gke-metrics-agent.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "pdcsi-node-windows" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "anetd" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "netd" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke-big" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke-small" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke-max" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-14", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nccl-fastsocket-installer" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-15", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "filestore-node" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-16", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "pdcsi-node" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-17", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "ip-masq-agent" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-18", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "anetd-win" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-19", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "gke-metadata-server" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-20", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "gke-metrics-agent-windows" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-21", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "kube-proxy" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-22", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-23", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin-large" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-24", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin-medium" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-25", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "image-package-extractor" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-26", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "namespace": "kube-system", + "name": "image-package-extractor-cleanup" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-27", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin-small" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-28", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-29", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "kube-dns" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-30", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "egress-nat-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-31", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "event-exporter-gke" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-32", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "antrea-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-33", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "antrea-controller-horizontal-autoscaler" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-34", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "kube-dns-autoscaler" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-35", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "metrics-server-v[0-9.]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-36", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "konnectivity-agent-autoscaler" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-37", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentd-elasticsearch" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-38", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "konnectivity-agent" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-39", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "l7-default-backend" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-public-resources", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-public", + "name": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-gke-kube-node-lease-resources", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-node-lease", + "name": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "konnectivity-agent-cpha" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "endpointslicemirroring-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "replicaset-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "endpointslice-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "service-account-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "namespace-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "clusterrole-aggregation-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "generic-garbage-collector" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "certificate-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "daemon-set-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "cloud-provider" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ephemeral-volume-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-14", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "root-ca-cert-publisher" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-16", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "bootstrap-signer" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-18", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "expand-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-19", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "disruption-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-20", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ttl-after-finished-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-21", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "job-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-22", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "pv-protection-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-23", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "persistent-volume-binder" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-24", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "pvc-protection-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-25", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "statefulset-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-26", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "deployment-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-27", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "node-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-28", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "cronjob-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-29", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "resourcequota-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-30", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "endpoint-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-31", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "pod-garbage-collector" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-32", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ttl-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-33", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "token-cleaner" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-34", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "kube-dns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-35", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "attachdetach-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-36", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "kube-proxy" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-37", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "konnectivity-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-38", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "replication-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-39", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-40", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "service-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-41", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "kube-dns-autoscaler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-42", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "netd" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-43", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "metadata-proxy" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-44", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "antrea-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-45", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "cilium" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-46", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "node-local-dns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-47", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "gke-metrics-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-48", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "egress-nat-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-49", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "antrea-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-50", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "event-exporter-sa" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-51", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "antrea-cpha" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-52", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "fluentbit-gke" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-53", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "pdcsi-node-sa" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-54", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ip-masq-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-55", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "filestorecsi-node-sa" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-56", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "gke-metadata-server" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-users-and-groups-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "namespace": "kube-system", + "name": "system:vpa-recommender" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-users-and-groups-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "namespace": "kube-system", + "name": "system:anet-operator" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:clustermetrics" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:controller:glbc" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:l7-lb-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:managed-certificate-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:gke-common-webhooks" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:kube-scheduler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:gcp-controller-manager" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:resource-tracker" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:storageversionmigrator" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:kube-controller-manager" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:kubestore-collector" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Group", + "name": "system:masters" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "ca-validate-cfg" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "flowcontrol-guardrails.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "validation-webhook.snapshot.storage.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "gmp-operator.gmp-system.monitoring.googleapis.com" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "warden-validating.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "nodelimit.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "gkepolicy.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "validation-webhook.snapshot.storage.k8s.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "APIService", + "name": "v1beta1.metrics.k8s.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "pod-ready.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "ca-mutate-cfg" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "neg-annotation.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "mutate-scheduler-profile.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-14", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "sasecret-redacter.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-15", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "workload-defaulter.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-16", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "admissionwebhookcontroller.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-17", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "gke-vpa-webhook-config" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-18", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "filestorecsi-mutation-webhook.storage.k8s.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-19", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-20", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "gmp-public" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-21", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "gmp-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kube-controller-manager", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kube-scheduler", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "route-controller", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "superadmin", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "pkgextract-service", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "default", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "collector", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "operator", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "collector", + "namespace": "gmp-public" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "alertmanager", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "collector", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "rule-evaluator", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-14", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gmp-operator", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-15", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "name": "gke-metrics-agent-conf", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-eks-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "aws-node-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-proxy-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "metrics-server-[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "aws-node" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "coredns" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "eventrouter" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "ebs-csi-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "ebs-csi-node" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "ebs-csi-node-windows" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "coredns-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-14", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "metrics-server-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-16", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "kube-dns" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-17", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "aws-cloud-provider" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-18", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "aws-node" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-19", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "eks-admin" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-20", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "eks-vpc-resource-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-21", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-22", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "tagging-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-23", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "vpc-resource-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-24", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "eventrouter" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-25", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ebs-csi-controller-sa" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-26", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ebs-csi-node-sa" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-27", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "eks:fargate-manager" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-28", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "eks:addon-manager" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-29", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "eks:certificate-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-30", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "eks:node-manager" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-31", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Group", + "name": "system:masters" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-otel", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "otel-collector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + } + ] + }, + { + "name": "exclude-service-accounts-16", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0186" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-service-accounts-17", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kubescape", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0186" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-service-accounts-18", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0186" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-service-accounts-19", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "storage-aggregated-apiserver-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0186" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-service-accounts-20", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "storage", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0190" + } + ] + }, + { + "name": "exclude-service-accounts-21", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "node-agent", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0190" + } + ] + }, + { + "name": "exclude-aks-kube-system-deployments-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "coredns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "coredns-autoscaler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "konnectivity-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azuredisk-node-win" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "azure-ip-masq-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "cloud-node-manager" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "cloud-node-manager-windows" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "omsagent-rs" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-pods-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "azure-ip-masq-agent-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "cloud-node-manager-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "coredns-autoscaler--[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "csi-azuredisk-node-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "csi-azurefile-node-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "konnectivity-agent-[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "omsagent-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "omsagent-rs-[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-services-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "kube-dns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-services-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azuredisk-node" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azurefile-node" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azurefile-node-win" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "kube-proxy" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "omsagent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "omsagent-win" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-replicasets-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "coredns-autoscaler-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-replicasets-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "coredns-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-replicasets-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "konnectivity-agent-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-replicasets-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "metrics-server-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-replicasets-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "omsagent-rs-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-namespaces-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-public" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-namespaces-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-node-lease" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "azure-cloud-provider" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "cloud-node-manager" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "coredns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "coredns-autoscaler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "csi-azuredisk-node-sa" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "csi-azurefile-node-sa" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-23", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "horizontal-pod-autoscaler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-30", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "omsagent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-46", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "default", + "name": "kube-root-ca.crt" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-47", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-node-lease", + "name": "kube-root-ca.crt" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-48", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-public", + "name": "kube-root-ca.crt" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-49", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "azure-ip-masq-agent-config-reconciled" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-50", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "cluster-autoscaler-status" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-51", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "container-azm-ms-aks-k8scluster" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-52", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "coredns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-53", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "coredns-autoscaler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-54", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "coredns-custom" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-55", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "extension-apiserver-authentication" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-56", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "kube-root-ca.crt" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-57", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "omsagent-rs-config" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-58", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "overlay-upgrade-data" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-59", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "aks-webhook-admission-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-60", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "aks-node-mutating-webhook" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-61", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "aks-node-validating-webhook" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-63", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Group", + "name": "system:nodes" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-64", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "clusterAdmin" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-minikube-kube-system-resources-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-proxy-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "sealed-secrets-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "tpu-device-plugin" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "runsc-metric-server" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-system" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "storage-provisioner" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-scheduler-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-controller-manager-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-kube-system-service-accounts-84", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "storage-provisioner" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-14", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "apiVersion": "rbac.authorization.k8s.io", + "name": "system:kube-scheduler", + "kind": "User" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-15", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "apiVersion": "rbac.authorization.k8s.io", + "name": "system:kube-controller-manager", + "kind": "User" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-16", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "apiVersion": "rbac.authorization.k8s.io", + "name": "system:masters", + "kind": "Group" + } + } + ], + "posturePolicies": [ + {} + ] + } +] \ No newline at end of file diff --git a/releaseDev/frameworks.json b/releaseDev/frameworks.json new file mode 100644 index 000000000..1a5b22d6f --- /dev/null +++ b/releaseDev/frameworks.json @@ -0,0 +1,11764 @@ +[ + { + "name": "DevOpsBest", + "description": "", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Configured readiness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "controlID": "C-0018", + "example": "@controls/examples/c018.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Configured liveness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "controlID": "C-0056", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Naked pods", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", + "long_description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "test": "Test if pods are not associated with Deployment, ReplicaSet etc. If not, fail.", + "controlID": "C-0073", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Image pull policy on latest tag", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", + "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", + "test": "If imagePullPolicy = always pass, else fail.", + "controlID": "C-0075", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "controlID": "C-0076", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", + "controlID": "C-0077", + "baseScore": 2.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Deprecated Kubernetes image registry", + "attributes": {}, + "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", + "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", + "controlID": "C-0253", + "baseScore": 5.0, + "example": "@controls/examples/c239.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure CPU requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the CPU requests are not set.", + "remediation": "Set the CPU requests or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0268", + "baseScore": 3.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure memory requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the memory requests are not set.", + "remediation": "Set the memory requests or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0269", + "baseScore": 3.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0018", + "C-0044", + "C-0056", + "C-0061", + "C-0073", + "C-0074", + "C-0075", + "C-0076", + "C-0077", + "C-0253", + "C-0268", + "C-0269", + "C-0270", + "C-0271" + ] + }, + { + "name": "AllControls", + "description": "Contains all the controls from all the frameworks", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Roles with delete capabilities", + "attributes": { + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Access Kubernetes dashboard", + "attributes": { + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", + "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", + "controlID": "C-0014", + "baseScore": 2.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Configured readiness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "controlID": "C-0018", + "example": "@controls/examples/c018.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Mount service principal", + "attributes": { + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", + "controlID": "C-0020", + "baseScore": 4.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", + "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", + "controlID": "C-0021", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Kubernetes CronJob", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "test": "We list all CronJobs that exist in cluster for the user to approve.", + "controlID": "C-0026", + "baseScore": 1.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Delete Kubernetes events", + "attributes": { + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", + "test": "List who has delete/deletecollection RBAC permissions on events.", + "controlID": "C-0031", + "baseScore": 4.0, + "example": "@controls/examples/c031.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Validate admission controller (validating)", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0036", + "baseScore": 3.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Validate admission controller (mutating)", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0039", + "baseScore": 4.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "SSH server running inside container", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", + "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", + "controlID": "C-0042", + "baseScore": 3.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Network mapping", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0049", + "baseScore": 3.0, + "example": "@controls/examples/c049.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Instance Metadata API", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", + "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", + "controlID": "C-0052", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Access container service account", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", + "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", + "controlID": "C-0053", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Configured liveness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "controlID": "C-0056", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Sudo in container entrypoint", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "test": "Check that there is no 'sudo' in the container entrypoint", + "controlID": "C-0062", + "baseScore": 5.0, + "example": "@controls/examples/c062.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "No impersonation", + "attributes": { + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", + "controlID": "C-0065", + "baseScore": 6.0, + "example": "@controls/examples/c065.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Naked pods", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", + "long_description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "test": "Test if pods are not associated with Deployment, ReplicaSet etc. If not, fail.", + "controlID": "C-0073", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Image pull policy on latest tag", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", + "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", + "test": "If imagePullPolicy = always pass, else fail.", + "controlID": "C-0075", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "controlID": "C-0076", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", + "controlID": "C-0077", + "baseScore": 2.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0079", + "baseScore": 4.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0081", + "baseScore": 4.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", + "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", + "controlID": "C-0087", + "baseScore": 7.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "RBAC enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-39328-grafana-auth-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", + "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", + "controlID": "C-0090", + "baseScore": 9.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", + "controlID": "C-0091", + "baseScore": 8.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0265", + "name": "Authenticated user has sensitive permissions", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0005", + "C-0007", + "C-0012", + "C-0013", + "C-0014", + "C-0015", + "C-0016", + "C-0017", + "C-0018", + "C-0020", + "C-0021", + "C-0026", + "C-0030", + "C-0031", + "C-0034", + "C-0035", + "C-0036", + "C-0038", + "C-0039", + "C-0041", + "C-0042", + "C-0044", + "C-0045", + "C-0046", + "C-0048", + "C-0049", + "C-0052", + "C-0053", + "C-0054", + "C-0055", + "C-0056", + "C-0057", + "C-0058", + "C-0059", + "C-0061", + "C-0062", + "C-0063", + "C-0065", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0073", + "C-0074", + "C-0075", + "C-0076", + "C-0077", + "C-0078", + "C-0079", + "C-0081", + "C-0087", + "C-0088", + "C-0090", + "C-0091", + "C-0262", + "C-0265", + "C-0270", + "C-0271" + ] + }, + { + "name": "cis-v1.23-t1.0.1", + "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", + "attributes": { + "armoBuiltin": true, + "version": "v1.0.1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "1": { + "id": "1", + "name": "Control Plane Components", + "subSections": { + "1": { + "id": "1.1", + "name": "Control Plane Node Configuration Files", + "controlsIDs": [ + "C-0092", + "C-0093", + "C-0094", + "C-0095", + "C-0096", + "C-0097", + "C-0098", + "C-0099", + "C-0100", + "C-0101", + "C-0102", + "C-0103", + "C-0104", + "C-0105", + "C-0106", + "C-0107", + "C-0108", + "C-0109", + "C-0110", + "C-0111", + "C-0112" + ] + }, + "2": { + "id": "1.2", + "name": "API Server", + "controlsIDs": [ + "C-0113", + "C-0114", + "C-0115", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0124", + "C-0125", + "C-0126", + "C-0127", + "C-0128", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0143" + ] + }, + "3": { + "id": "1.3", + "name": "Controller Manager", + "controlsIDs": [ + "C-0144", + "C-0145", + "C-0146", + "C-0147", + "C-0148", + "C-0149", + "C-0150" + ] + }, + "4": { + "id": "1.4", + "name": "Scheduler", + "controlsIDs": [ + "C-0151", + "C-0152" + ] + } + } + }, + "2": { + "name": "etcd", + "id": "2", + "controlsIDs": [ + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159" + ] + }, + "3": { + "name": "Control Plane Configuration", + "id": "3", + "subSections": { + "2": { + "name": "Logging", + "id": "3.2", + "controlsIDs": [ + "C-0160", + "C-0161" + ] + } + } + }, + "4": { + "name": "Worker Nodes", + "id": "4", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "4.1", + "controlsIDs": [ + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171" + ] + }, + "2": { + "name": "Kubelet", + "id": "4.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184" + ] + } + } + }, + "5": { + "name": "Policies", + "id": "5", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "5.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191" + ] + }, + "2": { + "name": "Pod Security Standards", + "id": "5.2", + "controlsIDs": [ + "C-0192", + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204" + ] + }, + "3": { + "name": "Network Policies and CNI", + "id": "5.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "name": "Secrets Management", + "id": "5.4", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "7": { + "name": "General Policies", + "id": "5.7", + "controlsIDs": [ + "C-0209", + "C-0210", + "C-0211", + "C-0212" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "controlID": "C-0092", + "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0093", + "name": "CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0094", + "name": "CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0095", + "name": "CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0096", + "name": "CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0097", + "name": "CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0098", + "name": "CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0099", + "name": "CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0100", + "name": "CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0101", + "name": "CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0102", + "name": "CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory has permissions of `755`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0103", + "name": "CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0104", + "name": "CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, admin.conf has permissions of `600`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0105", + "name": "CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0106", + "name": "CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0107", + "name": "CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0108", + "name": "CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0109", + "name": "CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0110", + "name": "CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0111", + "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0112", + "name": "CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0113", + "name": "CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the API server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0114", + "name": "CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", + "description": "Do not use token based authentication.", + "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0115", + "name": "CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0116", + "name": "CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "description": "Enable certificate based kubelet authentication.", + "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, certificate-based kubelet authentication is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0117", + "name": "CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", + "description": "Verify kubelet's certificate before establishing connection.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0118", + "name": "CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not always authorize all requests.", + "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Only authorized requests will be served.", + "default_value": "By default, `AlwaysAllow` is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0119", + "name": "CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, `Node` authorization is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0120", + "name": "CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", + "description": "Turn on Role Based Access Control.", + "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", + "default_value": "By default, `RBAC` authorization is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0121", + "name": "CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", + "description": "Limit the rate at which the API server accepts requests.", + "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "You need to carefully tune in limits as per your environment.", + "default_value": "By default, `EventRateLimit` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0122", + "name": "CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", + "description": "Do not allow all requests.", + "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", + "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0123", + "name": "CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", + "description": "Always pull images.", + "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" + ], + "attributes": {}, + "baseScore": 4, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", + "default_value": "By default, `AlwaysPullImages` is not set.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0124", + "name": "CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", + "default_value": "By default, `SecurityContextDeny` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0125", + "name": "CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", + "description": "Automate service accounts management.", + "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "None.", + "default_value": "By default, `ServiceAccount` is set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0126", + "name": "CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", + "description": "Reject creating objects in a namespace that is undergoing termination.", + "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "None", + "default_value": "By default, `NamespaceLifecycle` is set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0127", + "name": "CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `NodeRestriction` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0128", + "name": "CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", + "description": "Do not disable the secure port.", + "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "You need to set the API Server up with the right TLS certificates.", + "default_value": "By default, port 6443 is used as the secure port.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0129", + "name": "CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0130", + "name": "CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0131", + "name": "CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", + "description": "Retain the logs for at least 30 days or as appropriate.", + "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0132", + "name": "CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", + "description": "Retain 10 or an appropriate number of old log files.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0133", + "name": "CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0134", + "name": "CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", + "description": "Set global request timeout for API server requests as appropriate.", + "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--request-timeout` is set to 60 seconds.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0135", + "name": "CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", + "description": "Validate service account before validating token.", + "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `--service-account-lookup` argument is set to `true`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0136", + "name": "CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-key-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0137", + "name": "CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0138", + "name": "CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0139", + "name": "CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0140", + "name": "CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-cafile` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0141", + "name": "CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", + "description": "Encrypt etcd key-value store.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `--encryption-provider-config` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0142", + "name": "CIS-1.2.30 Ensure that encryption providers are appropriately configured", + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, no encryption provider is set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0143", + "name": "CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0144", + "name": "CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", + "description": "Activate garbage collector on pod termination, as appropriate.", + "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0145", + "name": "CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0146", + "name": "CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", + "description": "Use individual service account credentials for each controller.", + "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" + ], + "attributes": {}, + "baseScore": 4, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", + "default_value": "By default, `--use-service-account-credentials` is set to false.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0147", + "name": "CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-private-key-file` it not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0148", + "name": "CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "You need to setup and maintain root certificate authority file.", + "default_value": "By default, `--root-ca-file` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0149", + "name": "CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation on controller-manager.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0150", + "name": "CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0151", + "name": "CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0152", + "name": "CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0153", + "name": "CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", + "description": "Configure TLS encryption for the etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Client connections only over TLS would be served.", + "default_value": "By default, TLS encryption is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0154", + "name": "CIS-2.2 Ensure that the --client-cert-auth argument is set to true", + "description": "Enable client authentication on etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", + "default_value": "By default, the etcd service can be queried by unauthenticated clients.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0155", + "name": "CIS-2.3 Ensure that the --auto-tls argument is not set to true", + "description": "Do not use self-signed certificates for TLS.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", + "default_value": "By default, `--auto-tls` is set to `false`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0156", + "name": "CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0157", + "name": "CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", + "description": "etcd should be configured for peer authentication.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", + "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0158", + "name": "CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0159", + "name": "CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", + "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", + "default_value": "By default, no etcd certificate is created and used.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0160", + "name": "CIS-3.2.1 Ensure that a minimal audit policy is created", + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", + "remediation": "Create an audit policy file for your cluster.", + "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", + "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0161", + "name": "CIS-3.2.2 Ensure that the audit policy covers key security concerns", + "description": "Ensure that the audit policy created for the cluster covers key security concerns.", + "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", + "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", + "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", + "default_value": "By default Kubernetes clusters do not log audit information.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0162", + "name": "CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kubelet` service file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0163", + "name": "CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0164", + "name": "CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, proxy file has permissions of `640`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0165", + "name": "CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `proxy` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0166", + "name": "CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file has permissions of `600`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0167", + "name": "CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0168", + "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0169", + "name": "CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0170", + "name": "CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0171", + "name": "CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0172", + "name": "CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0173", + "name": "CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0174", + "name": "CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0175", + "name": "CIS-4.2.4 Verify that the --read-only-port argument is set to 0", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0176", + "name": "CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0177", + "name": "CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "By default, `--protect-kernel-defaults` is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0178", + "name": "CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0179", + "name": "CIS-4.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", + "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", + "default_value": "By default, `--hostname-override` argument is not set.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0180", + "name": "CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "By default, `--event-qps` argument is set to `5`.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0181", + "name": "CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the Kubelets.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0182", + "name": "CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", + "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet client certificate rotation is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0183", + "name": "CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet server certificate rotation is enabled.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0184", + "name": "CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.5 Ensure that default service accounts are not actively used", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0192", + "name": "CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", + "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", + "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", + "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", + "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", + "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0193", + "name": "CIS-5.2.2 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of privileged containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0194", + "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0195", + "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0196", + "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0197", + "name": "CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0198", + "name": "CIS-5.2.7 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0199", + "name": "CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", + "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0200", + "name": "CIS-5.2.9 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, there are no restrictions on adding capabilities to containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0201", + "name": "CIS-5.2.10 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0202", + "name": "CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", + "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", + "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0203", + "name": "CIS-5.2.12 Minimize the admission of HostPath volumes", + "description": "Do not generally admit containers which make use of `hostPath` volumes.", + "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0204", + "name": "CIS-5.2.13 Minimize the admission of containers which use HostPorts", + "description": "Do not generally permit containers which require the use of HostPorts.", + "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the use of HostPorts.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0205", + "name": "CIS-5.3.1 Ensure that the CNI in use supports Network Policies", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "This will depend on the CNI plugin in use.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", + "default_value": "By default, network policies are not created.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "category": { + "name": "Workload", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.4.2 Consider external secret storage", + "controlID": "C-0208", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "manual_test": "Review your secrets management implementation.", + "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" + ], + "attributes": {}, + "baseScore": 5, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", + "controlID": "C-0210", + "description": "Enable `docker/default` seccomp profile in your pod definitions.", + "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", + "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", + "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.7.3 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" + ], + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.7.4 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0092", + "C-0093", + "C-0094", + "C-0095", + "C-0096", + "C-0097", + "C-0098", + "C-0099", + "C-0100", + "C-0101", + "C-0102", + "C-0103", + "C-0104", + "C-0105", + "C-0106", + "C-0107", + "C-0108", + "C-0109", + "C-0110", + "C-0111", + "C-0112", + "C-0113", + "C-0114", + "C-0115", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0124", + "C-0125", + "C-0126", + "C-0127", + "C-0128", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0143", + "C-0144", + "C-0145", + "C-0146", + "C-0147", + "C-0148", + "C-0149", + "C-0150", + "C-0151", + "C-0152", + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159", + "C-0160", + "C-0161", + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0192", + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0210", + "C-0211", + "C-0212" + ] + }, + { + "name": "SOC2", + "description": "SOC2 compliance related controls", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Firewall (CC6.1,CC6.6,CC7.2)", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management.", + "remediation": "Define network policies for all workloads to protect unwanted access", + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [], + "long_description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management." + }, + { + "name": "Cryptographic key management - misplaced secrets (CC6.1,CC6.6,CC6.7)", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cryptographic key management - minimize access to secrets (CC6.1,CC6.6,CC6.7)", + "controlID": "C-0186", + "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "long_description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Access restriction to infrastructure - admin access (CC6.1 ,CC6.2, CC6.7, CC6.8)", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Event logging (CC6.8,CC7.1,CC7.2)", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers.", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers: - Logon attempts - Data deletions - Application and system errors - Changes to software and configuration settings - Changes to system files, configuration files or content files The logs are monitored by IT Operations staff and significant issues are investigated and resolved within a timely manner.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Data in motion encryption - Ingress is TLS encrypted (CC6.1,CC6.6,CC6.7)", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", + "remediation": "The user needs to implement TLS for the Ingress resource in order to encrypt the incoming traffic", + "test": "Check if the Ingress resource has TLS configured", + "controlID": "C-0263", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [], + "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." + } + ], + "ControlsIDs": [ + "C-0260", + "C-0012", + "C-0186", + "C-0035", + "C-0067", + "C-0263" + ] + }, + { + "name": "MITRE", + "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Roles with delete capabilities", + "attributes": { + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Access Kubernetes dashboard", + "attributes": { + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", + "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", + "controlID": "C-0014", + "baseScore": 2.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Mount service principal", + "attributes": { + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", + "controlID": "C-0020", + "baseScore": 4.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", + "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", + "controlID": "C-0021", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Kubernetes CronJob", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "test": "We list all CronJobs that exist in cluster for the user to approve.", + "controlID": "C-0026", + "baseScore": 1.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Delete Kubernetes events", + "attributes": { + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", + "test": "List who has delete/deletecollection RBAC permissions on events.", + "controlID": "C-0031", + "baseScore": 4.0, + "example": "@controls/examples/c031.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Validate admission controller (validating)", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0036", + "baseScore": 3.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CoreDNS poisoning", + "attributes": { + "microsoftMitreColumns": [ + "Lateral Movement" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", + "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", + "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", + "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", + "controlID": "C-0037", + "baseScore": 4.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Validate admission controller (mutating)", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0039", + "baseScore": 4.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "SSH server running inside container", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", + "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", + "controlID": "C-0042", + "baseScore": 3.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Instance Metadata API", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", + "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", + "controlID": "C-0052", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Access container service account", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", + "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", + "controlID": "C-0053", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0007", + "C-0012", + "C-0014", + "C-0015", + "C-0020", + "C-0021", + "C-0026", + "C-0031", + "C-0035", + "C-0036", + "C-0037", + "C-0039", + "C-0042", + "C-0045", + "C-0048", + "C-0052", + "C-0053", + "C-0054", + "C-0057", + "C-0058", + "C-0059", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070" + ] + }, + { + "name": "NSA", + "description": "Implement NSA security advices for K8s ", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0005", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0030", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0046", + "C-0054", + "C-0055", + "C-0057", + "C-0058", + "C-0059", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0270", + "C-0271" + ] + }, + { + "name": "cis-eks-t1.2.0", + "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", + "attributes": { + "armoBuiltin": true, + "version": "v1.2.0" + }, + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "2": { + "name": "Control Plane Configuration", + "id": "2", + "subSections": { + "1": { + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0067" + ] + } + } + }, + "3": { + "name": "Worker Nodes", + "id": "3", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + }, + "2": { + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0183" + ] + }, + "3": { + "name": "Container Optimized OS", + "id": "3.3", + "controlsIDs": [ + "C-0226" + ] + } + } + }, + "4": { + "name": "Policies", + "id": "4", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0246" + ] + }, + "2": { + "name": "Pod Security Policies", + "id": "4.2", + "controlsIDs": [ + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0220" + ] + }, + "3": { + "name": "CNI Plugin", + "id": "4.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "name": "Secrets Management", + "id": "4.4", + "controlsIDs": [ + "C-0207", + "C-0234" + ] + }, + "6": { + "name": "General Policies", + "id": "4.6", + "controlsIDs": [ + "C-0209", + "C-0211", + "C-0212" + ] + } + } + }, + "5": { + "name": "Managed services", + "id": "5", + "subSections": { + "1": { + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0078", + "C-0221", + "C-0222", + "C-0223" + ] + }, + "2": { + "name": "Identity and Access Management (IAM)", + "id": "5.2", + "controlsIDs": [ + "C-0225" + ] + }, + "3": { + "name": "AWS EKS Key Management Service", + "id": "5.3", + "controlsIDs": [ + "C-0066" + ] + }, + "4": { + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231" + ] + }, + "5": { + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0232" + ] + }, + "6": { + "name": "Other Cluster Configurations", + "id": "5.6", + "controlsIDs": [ + "C-0233", + "C-0242" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation.", + "remediation": "This process can only be performed during Cluster Creation.\n\n Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section.", + "long_description": "Kubernetes can store secrets that pods can access via a mounted volume. Today, Kubernetes secrets are stored with Base64 encoding, but encrypting is the recommended approach. Amazon EKS clusters version 1.13 and higher support the capability of encrypting your Kubernetes secrets using AWS Key Management Service (KMS) Customer Managed Keys (CMK). The only requirement is to enable the encryption provider support during EKS cluster creation.\n\n Use AWS Key Management Service (KMS) keys to provide envelope encryption of Kubernetes secrets stored in Amazon EKS. Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.\n\n Application-layer Secrets Encryption provides an additional layer of security for sensitive data, such as user defined Secrets and Secrets required for the operation of the cluster, such as service account keys, which are all stored in etcd.\n\n Using this functionality, you can use a key, that you manage in AWS KMS, to encrypt data at the application layer. This protects against attackers in the event that they manage to gain access to etcd.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [], + "manual_test": "Using the etcdctl commandline, read that secret out of etcd:\n\n \n```\netcdCTL_API=3 etcdctl get /registry/secrets/default/secret1 [...] | hexdump -C\n\n```\n where [...] must be the additional arguments for connecting to the etcd server.\n\n Verify the stored secret is prefixed with k8s:enc:aescbc:v1: which indicates the aescbc provider has encrypted the resulting data.", + "references": [ + "https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-eks-adds-envelope-encryption-for-secrets-with-aws-kms/" + ], + "impact_statement": "", + "default_value": "By default secrets created using the Kubernetes API are stored in *tmpfs* and are encrypted at rest." + }, + { + "name": "CIS-2.1.1 Enable audit Logs", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Control plane logs provide visibility into operation of the EKS Control plane component systems. The API server audit logs record all accepted and rejected requests in the cluster. When enabled via EKS configuration the control plane logs for a cluster are exported to a CloudWatch Log Group for persistence.", + "remediation": "**From Console:**\n\n 1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n\n \n```\nAPI server: Enabled\nAudit: Enabled\t\nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n\n```\n 5. Click 'Save Changes'.\n\n **From CLI:**\n\n \n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n\n```", + "long_description": "Audit logs enable visibility into all API server requests from authentic and anonymous sources. Stored log data can be analyzed manually or with tools to identify and understand anomalous or negative activity and lead to intelligent remediations.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Use approved container registries.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [], + "references": [ + "https://aws.amazon.com/blogs/opensource/using-open-policy-agent-on-amazon-eks/" + ], + "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry.", + "default_value": "" + }, + { + "controlID": "C-0167", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node.\n\n For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AWS EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0171", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the AWS EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0172", + "name": "CIS-3.2.1 Ensure that the Anonymous Auth is Not Enabled", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Disable Anonymous Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--anonymous-auth=false\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the[Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Anonymous Authentication is not enabled. This may be configured as a command line argument to the kubelet service with `--anonymous-auth=false` or in the kubelet configuration file via `\"authentication\": { \"anonymous\": { \"enabled\": false }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with `kubectl` on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Anonymous Authentication is not enabled checking that `\"authentication\": { \"anonymous\": { \"enabled\": false }` is in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "See the EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0173", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets can be configured to allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Enable Webhook Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n\n```\n Next, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n \n```\n\"authorization\": { \"mode\": \"Webhook }\n\n```\n Finer detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Webhook Authentication is enabled. This may be enabled as a command line argument to the kubelet service with `--authentication-token-webhook` or in the kubelet configuration file via `\"authentication\": { \"webhook\": { \"enabled\": true } }`.\n\n Verify that the Authorization Mode is set to `WebHook`. This may be set as a command line argument to the kubelet service with `--authorization-mode=Webhook` or in the configuration file via `\"authorization\": { \"mode\": \"Webhook }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Webhook Authentication is enabled with `\"authentication\": { \"webhook\": { \"enabled\": true } }` in the API response.\n\n Verify that the Authorization Mode is set to `WebHook` with `\"authorization\": { \"mode\": \"Webhook }` in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "See the EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0174", + "name": "CIS-3.2.3 Ensure that a Client CA File is Configured", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Configure the client certificate authority file by setting the following parameter appropriately:\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--client-ca-file=\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that a client certificate authority file is configured. This may be configured using a command line argument to the kubelet service with `--client-ca-file` or in the kubelet configuration file via `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that a client certificate authority file is configured with `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"` in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "See the EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0175", + "name": "CIS-3.2.4 Ensure that the --read-only-port is disabled", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0\n\n \n```\n\"readOnlyPort\": 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For each remediation:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0176", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/pull/18552" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "See the EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0177", + "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/" + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "See the EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0178", + "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the above command includes the argument `--make-iptables-util-chains` then verify it is set to true.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains.:true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0179", + "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", + "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/issues/22063", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0180", + "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0181", + "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not present or is set to true", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", + "references": [ + "https://github.com/kubernetes/kubernetes/pull/41912", + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration", + "https://kubernetes.io/docs/imported/release/notes/", + "https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0183", + "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"featureGates\": {\n \"RotateKubeletServerCertificate\":true\n},\n\n```\n Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediation methods:**\nRestart the `kubelet` service and check status. The example below is for when using systemctl to manage services:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--rotate-kubelet-server-certificate` executable argument verify that it is set to true.\n\n If the process does not have the `--rotate-kubelet-server-certificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists in the `featureGates` section and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://github.com/kubernetes/kubernetes/pull/45059", + "https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the Amazon EKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "https://kubernetes.io/docs/admin/authorization/rbac/#user-facing-roles" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "https://aws.github.io/aws-eks-best-practices/iam/#disable-auto-mounting-of-service-account-tokens" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", + "references": [ + "https://www.impidio.com/blog/kubernetes-rbac-security-pitfalls", + "https://raesene.github.io/blog/2020/12/12/Escalating_Away/", + "https://raesene.github.io/blog/2021/01/16/Getting-Into-A-Bind-with-Kubernetes/" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0205", + "name": "CIS-4.3.1 Ensure CNI plugin supports network policies.", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports network policies.", + "references": [ + "https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/", + "https://aws.github.io/aws-eks-best-practices/network/" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None.", + "default_value": "This will depend on the CNI plugin in use.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "https://kubernetes.io/docs/concepts/services-networking/networkpolicies/", + "https://octetz.com/posts/k8s-network-policy-apis", + "https://kubernetes.io/docs/tasks/configure-pod-container/declare-network-policy/" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "default_value": "By default, network policies are not created.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "category": { + "name": "Workload", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.6.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Amazon EKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "default_value": "By default, Kubernetes starts with two initial namespaces:\n\n 1. `default` - The default namespace for objects with no other namespace\n2. `kube-system` - The namespace for objects created by the Kubernetes system\n3. `kube-public` - The namespace for public-readable ConfigMap\n4. `kube-node-lease` - The namespace for associated lease object for each node", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.6.2 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + ], + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.6.3 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0213", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" + ], + "attributes": {}, + "baseScore": 8.0, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0214", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0215", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0216", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0217", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0218", + "name": "CIS-4.2.6 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0219", + "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0220", + "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0221", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", + "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", + "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0222", + "name": "CIS-5.1.2 Minimize user access to Amazon ECR", + "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0223", + "name": "CIS-5.1.3 Minimize cluster access to read-only for Amazon ECR", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", + "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", + "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0225", + "name": "CIS-5.2.1 Prefer using dedicated EKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", + "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0226", + "name": "CIS-3.3.1 Prefer using a container-optimized OS when possible", + "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", + "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", + "remediation": "", + "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", + "references": [ + "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", + "https://aws.amazon.com/bottlerocket/" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", + "default_value": "A container-optimized OS is not the default.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0227", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": {}, + "baseScore": 8.0, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", + "default_value": "By default, Endpoint Public Access is disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0228", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "Check for private endpoint access to the Kubernetes API server", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": {}, + "baseScore": 8.0, + "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", + "default_value": "By default, the Public Endpoint is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0229", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", + "manual_test": "", + "references": [], + "attributes": {}, + "baseScore": 8.0, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0230", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", + "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", + "remediation": "", + "manual_test": "", + "references": [], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0231", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0232", + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", + "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", + "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", + "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", + "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", + "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0233", + "name": "CIS-5.6.1 Consider Fargate for running untrusted workloads", + "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", + "long_description": "", + "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "", + "default_value": "By default, AWS Fargate is not utilized.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0234", + "name": "CIS-4.4.2 Consider external secret storage", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "manual_test": "Review your secrets management implementation.", + "references": [], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0235", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0238", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0242", + "name": "CIS-5.6.2 Hostile multi-tenant workloads", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0246", + "name": "CIS-4.1.7 Avoid use of system:masters group", + "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", + "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", + "remediation": "Remove the `system:masters` group from all users in the cluster.", + "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", + "references": [ + "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", + "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0066", + "C-0067", + "C-0078", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0205", + "C-0206", + "C-0207", + "C-0209", + "C-0211", + "C-0212", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0220", + "C-0221", + "C-0222", + "C-0223", + "C-0225", + "C-0226", + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231", + "C-0232", + "C-0233", + "C-0234", + "C-0235", + "C-0238", + "C-0242", + "C-0246" + ] + }, + { + "name": "cis-aks-t1.2.0", + "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", + "attributes": { + "armoBuiltin": true, + "version": "v1.2.0" + }, + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "2": { + "name": "Master (Control Plane) Configuration", + "id": "2", + "subSections": { + "1": { + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0254" + ] + } + } + }, + "3": { + "name": "Worker Nodes", + "id": "3", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + }, + "2": { + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0182", + "C-0183" + ] + } + } + }, + "4": { + "name": "Policies", + "id": "4", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190" + ] + }, + "2": { + "name": "Pod Security Standards", + "id": "4.2", + "controlsIDs": [ + "C-0201", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219" + ] + }, + "3": { + "name": "Azure Policy / OPA", + "id": "4.3", + "controlsIDs": [] + }, + "4": { + "name": "CNI Plugin", + "id": "4.4", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "5": { + "name": "Secrets Management", + "id": "4.5", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "6": { + "name": "Extensible Admission Control", + "id": "4.6", + "controlsIDs": [] + }, + "7": { + "name": "General Policies", + "id": "4.7", + "controlsIDs": [ + "C-0209", + "C-0211", + "C-0212" + ] + } + } + }, + "5": { + "name": "Managed services", + "id": "5", + "subSections": { + "1": { + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0078", + "C-0243", + "C-0250", + "C-0251" + ] + }, + "2": { + "name": "Access and identity options for Azure Kubernetes Service (AKS)", + "id": "5.2", + "controlsIDs": [ + "C-0239", + "C-0241" + ] + }, + "3": { + "name": "Key Management Service (KMS)", + "id": "5.3", + "controlsIDs": [ + "C-0244" + ] + }, + "4": { + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0240", + "C-0245", + "C-0247", + "C-0248", + "C-0252" + ] + }, + "5": { + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0088" + ] + }, + "6": { + "name": "Other Cluster Configurations", + "id": "5.6", + "controlsIDs": [ + "C-0242", + "C-0249" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Use approved container registries.", + "remediation": "If you are using Azure Container Registry you have this option:\n\n For other non-AKS repos using admission controllers or Azure Policy will also work.\n\n Limiting or locking down egress traffic is also recommended:\n", + "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [], + "references": [ + "\n\n \n\n " + ], + "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry." + }, + { + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with Azure AD", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "long_description": "Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators.", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [], + "references": [ + "\n\n " + ] + }, + { + "controlID": "C-0167", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0171", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0172", + "name": "CIS-3.2.1 Ensure that the --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"anonymous\": \"enabled\": false\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--anonymous-auth=false\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*anonymous\":{\"enabled\":false}\"` by extracting the live configuration from the nodes running kubelet.\\*\\*See detailed step-by-step configmap procedures in[Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": { \"anonymous\": { \"enabled\": false }` argument is set to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"anonymous\":{\"enabled\":false}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0173", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\"... \"webhook\":{\"enabled\":true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--authorization-mode=Webhook\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"authentication\": \"webhook\": \"enabled\"` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": {\"webhook\": { \"enabled\": is set to true`.\n\n If the `\"authentication\": {\"mode\": {` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `\"authentication\": {\"mode\": {` to something other than `AlwaysAllow`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"webhook\":{\"enabled\":true}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0174", + "name": "CIS-3.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile:\" to the location of the client CA file.\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--client-ca-file=\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"x509\": {\"clientCAFile:\"` set to the location of the client certificate authority file.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"x509\": {\"clientCAFile:\"` argument exists and is set to the location of the client certificate authority file.\n\n If the `\"x509\": {\"clientCAFile:\"` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `\"authentication\": { \"x509\": {\"clientCAFile:\"` to the location of the client certificate authority file.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication.. x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0175", + "name": "CIS-3.2.4 Ensure that the --read-only-port is secured", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\nreadOnlyPort to 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For all remediations:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0176", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0177", + "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0178", + "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `makeIPTablesUtilChains` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that if the `makeIPTablesUtilChains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0179", + "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", + "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 3, + "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", + "default_value": "See the Azure AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0180", + "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "See the AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0182", + "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not set to false", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", + "references": [ + "\n\n \n\n \n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0183", + "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateKubeletServerCertificate\":true\n\n```\n **Remediation Method 2:**\n\n If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`.\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `RotateKubeletServerCertificate` is set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AKS documentation for the default value.", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0201", + "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0205", + "name": "CIS-4.4.1 Ensure latest CNI version is used", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "manual_test": "Ensure CNI plugin supports network policies.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None.", + "default_value": "This will depend on the CNI plugin in use.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.4.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "default_value": "By default, network policies are not created.", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.5.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "category": { + "name": "Workload", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.5.2 Consider external secret storage", + "controlID": "C-0208", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "manual_test": "Review your secrets management implementation.", + "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 5, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Azure AKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "\n\n \n\n \n\n ." + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "default_value": "When you create an AKS cluster, the following namespaces are available:\n\n NAMESPACES\nNamespace Description\ndefault Where pods and deployments are created by default when none is provided. In smaller environments, you can deploy applications directly into the default namespace without creating additional logical separations. When you interact with the Kubernetes API, such as with kubectl get pods, the default namespace is used when none is specified.\nkube-system Where core resources exist, such as network features like DNS and proxy, or the Kubernetes dashboard. You typically don't deploy your own applications into this namespace.\nkube-public Typically not used, but can be used for resources to be visible across the whole cluster, and can be viewed by any user.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.7.2 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this:\n\n \n```\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: restricted\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n # Assume that persistentVolumes set up by the cluster admin are safe to use.\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'MustRunAsNonRoot'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n\n```\n This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added.\n\n Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.7.3 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get all -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0213", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n as an alternative AZ CLI can be used:\n\n \n```\naz aks list --output yaml\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 8.0, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, when you provision an AKS cluster, the value of \"enablePodSecurityPolicy\" is null.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0214", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0215", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0216", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0217", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0218", + "name": "CIS-4.2.6 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0219", + "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 5.0, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0235", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6.0, + "impact_statement": "None.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0238", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "description": "If `kubelet` is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "None.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0239", + "name": "CIS-5.2.1 Prefer using dedicated AKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0240", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", + "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0241", + "name": "CIS-5.2.2 Use Azure RBAC for Kubernetes Authorization", + "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", + "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", + "remediation": "Set Azure RBAC as access system.", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0242", + "name": "CIS-5.6.2 Hostile multi-tenant workloads", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0243", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0244", + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted", + "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0245", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0247", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", + "default_value": "By default, Endpoint Private Access is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0248", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0249", + "name": "CIS-5.6.1 Restrict untrusted workloads", + "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", + "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "actionRequired": "manual review" + }, + "baseScore": 5, + "impact_statement": "", + "default_value": "ACI is not a default component of the AKS", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0250", + "name": "CIS-5.1.2 Minimize cluster access to read-only for Azure Container Registry (ACR)", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", + "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0251", + "name": "CIS-5.1.3 Minimize user access to Azure Container Registry (ACR)", + "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", + "manual_test": "", + "references": [ + "" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0252", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": {}, + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0254", + "name": "CIS-2.1.1 Enable audit Logs", + "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", + "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", + "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", + "default_value": "By default, cluster control plane logs aren't sent to be Logged.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0078", + "C-0088", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0182", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0201", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0211", + "C-0212", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0235", + "C-0238", + "C-0239", + "C-0240", + "C-0241", + "C-0242", + "C-0243", + "C-0244", + "C-0245", + "C-0247", + "C-0248", + "C-0249", + "C-0250", + "C-0251", + "C-0252", + "C-0254" + ] + }, + { + "name": "ArmoBest", + "description": "", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Network mapping", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0049", + "baseScore": 3.0, + "example": "@controls/examples/c049.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Sudo in container entrypoint", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "test": "Check that there is no 'sudo' in the container entrypoint", + "controlID": "C-0062", + "baseScore": 5.0, + "example": "@controls/examples/c062.yaml", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "No impersonation", + "attributes": { + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", + "controlID": "C-0065", + "baseScore": 6.0, + "example": "@controls/examples/c065.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0079", + "baseScore": 4.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0081", + "baseScore": 4.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", + "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", + "controlID": "C-0087", + "baseScore": 7.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-3172-aggregated-API-server-redirect", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [] + }, + "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", + "controlID": "C-0089", + "baseScore": 3.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", + "controlID": "C-0091", + "baseScore": 8.0, + "example": "", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0236", + "name": "Verify image signature", + "description": "Verifies the signature of each image with given public keys", + "long_description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "manual_test": "", + "references": [], + "attributes": { + "actionRequired": "configuration" + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0237", + "name": "Check if signature exists", + "description": "Ensures that all images contain some signature", + "long_description": "Verifies that each image is signed", + "remediation": "Replace the image with a signed image", + "manual_test": "", + "references": [], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0005", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0030", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0046", + "C-0049", + "C-0054", + "C-0055", + "C-0057", + "C-0058", + "C-0059", + "C-0061", + "C-0062", + "C-0063", + "C-0065", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0078", + "C-0079", + "C-0081", + "C-0087", + "C-0089", + "C-0091", + "C-0236", + "C-0237", + "C-0270", + "C-0271" + ] + } +] \ No newline at end of file diff --git a/releaseDev/mitre.json b/releaseDev/mitre.json new file mode 100644 index 000000000..06fca2d7d --- /dev/null +++ b/releaseDev/mitre.json @@ -0,0 +1,2112 @@ +{ + "name": "MITRE", + "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Roles with delete capabilities", + "attributes": { + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-excessive-delete-rights-v1", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + } + ] + }, + { + "name": "Access Kubernetes dashboard", + "attributes": { + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", + "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", + "controlID": "C-0014", + "baseScore": 2.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-access-dashboard-subject-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" + }, + { + "name": "rule-access-dashboard-wl-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Mount service principal", + "attributes": { + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", + "controlID": "C-0020", + "baseScore": 4.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-mount-potential-credentials-paths", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "relevantCloudProviders": [ + "EKS", + "GKE", + "AKS" + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tstart_of_path := volumes_data[\"start_of_path\"]\n result := is_unsafe_paths(volume, start_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"start_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"start_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"start_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, start_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [start_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" + } + ] + }, + { + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", + "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", + "controlID": "C-0021", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "exposed-sensitive-interfaces-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "configInputs": [ + "settings.postureControlInputs.sensitiveInterfaces" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveInterfaces", + "name": "Sensitive interfaces", + "description": "List of known software interfaces that should not generally be exposed to the Internet." + } + ], + "description": "fails if known interfaces have exposed services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" + } + ] + }, + { + "name": "Kubernetes CronJob", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "test": "We list all CronJobs that exist in cluster for the user to approve.", + "controlID": "C-0026", + "baseScore": 1.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rule-deny-cronjobs", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob" + }, + "ruleLanguage": "rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if it's cronjob", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# alert cronjobs\n\n# handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "Delete Kubernetes events", + "attributes": { + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", + "test": "List who has delete/deletecollection RBAC permissions on events.", + "controlID": "C-0031", + "baseScore": 4.0, + "example": "@controls/examples/c031.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-delete-k8s-events-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can delete events", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Validate admission controller (validating)", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0036", + "baseScore": 3.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-validating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Validate admission controller" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns validating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CoreDNS poisoning", + "attributes": { + "microsoftMitreColumns": [ + "Lateral Movement" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", + "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", + "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", + "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", + "controlID": "C-0037", + "baseScore": 4.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-update-configmap-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can update/patch the 'coredns' configmap", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Validate admission controller (mutating)", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0039", + "baseScore": 4.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-mutating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Validate admission controller" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns mutating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "SSH server running inside container", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", + "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", + "controlID": "C-0042", + "baseScore": 3.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-ssh-to-pod-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "denies pods with SSH ports opened(22/222)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-rw-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + }, + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines if any workload contains a hostPath volume with rw permissions", + "remediation": "Set the readOnly field of the mount to true", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"deletePaths\": failed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n}" + } + ] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-any-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" + } + ] + }, + { + "name": "Instance Metadata API", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", + "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", + "controlID": "C-0052", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "instance-metadata-api-access", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "cloudProviderInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" + } + ] + }, + { + "name": "Access container service account", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", + "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", + "controlID": "C-0053", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "access-container-service-account-v1", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which service accounts can be used to access other resources in the cluster", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" + } + ] + }, + { + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" + } + ] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "nginx-ingress-snippet-annotation-vulnerability", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" + } + ] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" + }, + { + "name": "psp-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0007", + "C-0012", + "C-0014", + "C-0015", + "C-0020", + "C-0021", + "C-0026", + "C-0031", + "C-0035", + "C-0036", + "C-0037", + "C-0039", + "C-0042", + "C-0045", + "C-0048", + "C-0052", + "C-0053", + "C-0054", + "C-0057", + "C-0058", + "C-0059", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070" + ] +} \ No newline at end of file diff --git a/releaseDev/nsa.json b/releaseDev/nsa.json new file mode 100644 index 000000000..2c1a47229 --- /dev/null +++ b/releaseDev/nsa.json @@ -0,0 +1,2096 @@ +{ + "name": "NSA", + "description": "Implement NSA security advices for K8s ", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "insecure-port-flag", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + } + ] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + } + ] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-allow-privilege-escalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "immutable-container-filesystem", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" + } + ] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ingress-and-egress-blocked", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if there are no ingress and egress defined for pod", + "remediation": "Make sure you define ingress and egress policies for all your Pods", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" + } + ] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-pid-ipc-privileges", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + } + ] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-network-access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "insecure-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + } + ] + }, + { + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "linux-hardening", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define any linux security hardening", + "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" + } + ] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "nginx-ingress-snippet-annotation-vulnerability", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" + } + ] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" + }, + { + "name": "psp-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-cpu-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU limits are not set.", + "remediation": "Ensure CPU limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-memory-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory limits are not set.", + "remediation": "Ensure memory limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0005", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0030", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0046", + "C-0054", + "C-0055", + "C-0057", + "C-0058", + "C-0059", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0270", + "C-0271" + ] +} \ No newline at end of file diff --git a/releaseDev/rules.json b/releaseDev/rules.json new file mode 100644 index 000000000..0535100f8 --- /dev/null +++ b/releaseDev/rules.json @@ -0,0 +1,8856 @@ +[ + { + "name": "outdated-k8s-version", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\tnode := input[_]\n\tnode.kind == \"Node\"\n\tcurrent_version := node.status.nodeInfo.kubeletVersion\n has_outdated_version(current_version)\n\tpath := \"status.nodeInfo.kubeletVersion\"\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Your kubelet version: %s, in node: %s is outdated\", [current_version, node.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [node]},\n\t}\n}\n\n\nhas_outdated_version(version) {\n\t# the `supported_k8s_versions` is validated in the validations script against \"https://api.github.com/repos/kubernetes/kubernetes/releases\"\n supported_k8s_versions := [\"v1.29\", \"v1.28\", \"v1.27\"] \n\tevery v in supported_k8s_versions{\n\t\tnot startswith(version, v)\n\t}\n}\n" + }, + { + "name": "ensure-aws-policies-are-present", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "fails if aws policies are not found", + "remediation": "Implement policies to minimize user access to Amazon ECR", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# deny if policies are not present on AWS\ndeny[msg] {\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"Cluster has not policies to minimize access to Amazon ECR; Add some policy in order to minimize access on it.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": policies\n\t\t}\n\t}\n}\n" + }, + { + "name": "kubelet-hostname-override", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --hostname-override argument is not set.", + "remediation": "Unset the --hostname-override argument.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" + }, + { + "name": "exposed-critical-pods", + "attributes": { + "m$K8sThreatMatrix": "exposed-critical-pods", + "imageScanRelated": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service", + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "armo.vuln.images", + "image.vulnscan.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ImageVulnerabilities" + ] + } + ], + "description": "Fails if pods have exposed services as well as critical vulnerabilities", + "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n\n container.image == vuln.metadata.name\n\n # At least one critical vulnerabilities\n filter_critical_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_critical_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.severity == \"Critical\"\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", + "resourceEnumerator": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}" + }, + { + "name": "rule-manual", + "attributes": { + "actionRequired": "manual review", + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", + "remediation": "", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" + }, + { + "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable certificate based kubelet authentication.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, certificate-based kubelet authentication is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"certificate based kubelet authentication is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t\"--kubelet-client-certificate\",\n\t\t\"--kubelet-client-key\",\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=\", [wanted[i]]),\n\t} |\n\t\twanted[i]\n\t\tnot contains(full_cmd, wanted[i])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "resources-memory-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory limits are not set.", + "remediation": "Ensure memory limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-controller-manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "configmap-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "podtemplate-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodTemplate" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "external-secret-storage", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" + }, + { + "name": "ensure_nodeinstancerole_has_right_permissions_for_ecr", + "attributes": { + "useFromKubescapeVersion": "v2.2.5" + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if a NodeInstanceRole has a policies not compliant with the following:\n# {\n# \"Version\": \"YYY-MM-DD\",\n# \"Statement\": [\n# {\n# \"Effect\": \"Allow\",\n# \"Action\": [\n# \"ecr:BatchCheckLayerAvailability\",\n# \"ecr:BatchGetImage\",\n# \"ecr:GetDownloadUrlForLayer\",\n# \"ecr:GetAuthorizationToken\"\n# ],\n# \"Resource\": \"*\"\n# }\n# ]\n# }\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"eks\"\n\n\trole_policies := resources.data.rolesPolicies\n\tnode_instance_role_policies := [key | role_policies[key]; contains(role_policies[key].PolicyRoles[_].RoleName, \"NodeInstance\")]\n\n\t# check if the policy satisfies the minimum prerequisites\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\t# node_instance_role_policies := [\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"]\n\tsome policy in node_instance_role_policies\n\t\tsome stat, _ in policies.data.policiesDocuments[policy].Statement\n\t\t\tnot isPolicyCompliant(policies, policy, stat)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Cluster has none read-only access to ECR; Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n\nisPolicyCompliant(policies, policy, stat) {\n\t# allowed action provided by the CIS\n\tallowed_actions := [\"ecr:BatchCheckLayerAvailability\",\n \t \"ecr:BatchGetImage\",\n \t \"ecr:GetAuthorizationToken\",\n \t \"ecr:GetDownloadUrlForLayer\"]\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Effect == \"Allow\"\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Resource == \"*\"\n\tsorted_actions := sort(policies.data.policiesDocuments[policy].Statement[stat].Action)\n\tsorted_actions == allowed_actions\n}\n" + }, + { + "name": "rule-excessive-delete-rights-v1", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "host-pid-ipc-privileges", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + }, + { + "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "kubelet-strong-cryptographics-ciphers", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the Kubelet is configured to only use strong cryptographic ciphers.", + "remediation": "Change --tls-cipher-suites value of TLSCipherSuites property of config file to use strong cryptographics ciphers", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.13 https://workbench.cisecurity.org/sections/1126668/recommendations/1838663\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--tls-cipher-suites\")\n\n\tnot has_strong_cipher_set_via_cli(command)\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.TLSCipherSuites\n\n\tnot is_value_in_strong_cliphers_set(yamlConfig.TLSCipherSuites)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [\"TLSCipherSuites\"],\n\t\t\"failedPaths\": [\"TLSCipherSuites\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\nhas_strong_cipher_set_via_cli(command) {\n\tcontains(command, \"--tls-cipher-suites=\")\n\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome i\n\tcontains(command, sprintf(\"%v%v\", [\"--tls-cipher-suites=\", strong_cliphers[i]]))\n}\n\nis_value_in_strong_cliphers_set(value) {\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome x\n\tstrong_cliphers[x] == value\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".key\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "workload-mounted-secrets", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Secret" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.secret\n\n\tsecret := input[_]\n\tsecret.kind == \"Secret\"\n\tsecret.metadata.name == volume.secret.secretName\n\tis_same_namespace(secret.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted secret\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": secret\n }]\n\t}\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + }, + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + }, + { + "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Set global request timeout for API server requests as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--request-timeout` is set to 60 seconds.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--request-timeout\")\n\tresult = {\n\t\t\"alert\": \"Please validate the request timeout flag is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeProxyInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n" + }, + { + "name": "ensure-external-secrets-storage-is-in-use", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "relevantCloudProviders": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.kubernetes.api.client\n\n# deny workloads that doesn't support external service provider (secretProviderClass)\n# reference - https://secrets-store-csi-driver.sigs.k8s.io/concepts.html\ndeny[msga] {\n\n resources := input[_]\n\n\t# get volume paths for each resource\n\tvolumes_path := get_volumes_path(resources)\n\n\t# get volumes for each resources\n\tvolumes := object.get(resources, volumes_path, [])\n\n\t# continue if secretProviderClass not found in resource\n\thaving_secretProviderClass := {i | volumes[i].csi.volumeAttributes.secretProviderClass}\n \tcount(having_secretProviderClass) == 0\n\n\n\t# prepare message data.\n\talert_message := sprintf(\"%s: %v is not using external secret storage\", [resources.kind, resources.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\":sprintf(\"%s[0].csi.volumeAttributes.secretProviderClass\",[concat(\".\", volumes_path)]), \"value\":\"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\n}\n\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n" + }, + { + "name": "CVE-2022-47633", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Limit the rate at which the API server accepts requests.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```\n\n#### Impact Statement\nYou need to carefully tune in limits as per your environment.\n\n#### Default Value\nBy default, `EventRateLimit` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"EventRateLimit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"EventRateLimit\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=EventRateLimit\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "workload-with-cluster-takeover-roles", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_start_of_path(wl)\n wl_spec := object.get(wl, start_of_path, [])\n\n # get service account wl is using\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(wl_spec, sa.metadata, wl.metadata)\n\n # check service account token is mounted\n is_sa_auto_mounted(wl_spec, sa)\n\n # check if sa has cluster takeover roles\n role := input[_]\n role.kind in [\"Role\", \"ClusterRole\"]\n is_takeover_role(role)\n\n rolebinding := input[_]\n\trolebinding.kind in [\"RoleBinding\", \"ClusterRoleBinding\"] \n rolebinding.roleRef.name == role.metadata.name\n rolebinding.subjects[j].kind == \"ServiceAccount\"\n rolebinding.subjects[j].name == sa.metadata.name\n rolebinding.subjects[j].namespace == sa.metadata.namespace\n\n reviewPath := \"roleRef\"\n deletePath := sprintf(\"subjects[%d]\", [j])\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v has cluster takeover roles\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa,\n },\n {\n \"object\": rolebinding,\n\t\t \"reviewPaths\": [reviewPath],\n \"deletePaths\": [deletePath],\n },\n {\n \"object\": role,\n },]\n }\n}\n\n\nget_start_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken not in pod spec\n not wl_spec.automountServiceAccountToken == false\n not wl_spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n}\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken set to true in pod spec\n wl_spec.automountServiceAccountToken == true\n}\n\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n wl_spec.serviceAccountName == sa_metadata.name\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n not wl_spec.serviceAccountName \n sa_metadata.name == \"default\"\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\n# is_same_namespace supports cases where ns is not configured in the metadata\n# for yaml scans\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n\n# look for rule allowing create/update workloads\nis_takeover_role(role){\n takeover_resources := [\"pods\", \"*\"]\n takeover_verbs := [\"create\", \"update\", \"patch\", \"*\"]\n takeover_api_groups := [\"\", \"*\"]\n \n takeover_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in takeover_resources ; \n rule.verbs[b] in takeover_verbs ; \n rule.apiGroups[c] in takeover_api_groups]\n count(takeover_rule) > 0\n}\n\n# look for rule allowing secret access\nis_takeover_role(role){\n rule := role.rules[i]\n takeover_resources := [\"secrets\", \"*\"]\n takeover_verbs := [\"get\", \"list\", \"watch\", \"*\"]\n takeover_api_groups := [\"\", \"*\"]\n \n takeover_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in takeover_resources ; \n rule.verbs[b] in takeover_verbs ; \n rule.apiGroups[c] in takeover_api_groups]\n count(takeover_rule) > 0\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" + }, + { + "name": "nginx-ingress-snippet-annotation-vulnerability", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" + }, + { + "name": "instance-metadata-api-access", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "cloudProviderInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" + }, + { + "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```\n\n#### Impact Statement\nYou would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-private-key-file` it not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token can not be rotated as needed\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-private-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-private-key-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "immutable-container-filesystem", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" + }, + { + "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 448 # == 0o700\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "kubelet-rotate-kubelet-server-certificate", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`RotateKubeletServerCertificate=true`, args[i])\n}\n" + }, + { + "name": "pods-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `Node` authorization is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubelet nodes can read objects that are not associated with them\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"Node\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"Node\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=Node\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "CVE-2022-23648", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" + }, + { + "name": "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities. Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security. When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file. When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "remediation": "Enable Azure Defender image scanning. Command: az aks update --enable-defender --resource-group --name ", + "ruleQuery": "armo_builtin", + "rule": "package armo_builtins\n\n# fails in case Azure Defender image scanning is not enabled.\ndeny[msga] {\n cluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties \n\n not isAzureImageScanningEnabled(properties)\n\n msga := {\n\t\t\"alertMessage\": \"Azure Defender image scanning is not enabled.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks update --enable-defender --resource-group --name \",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_describe\n },\n\n\t}\n}\n\n# isAzureImageScanningEnabled check if Azure Defender is enabled into the ClusterDescribe object.\nisAzureImageScanningEnabled(properties) {\n properties.securityProfile.defender.securityMonitoring.enabled == true\n}\n" + }, + { + "name": "container-image-repository-v1", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useFromKubescapeVersion": "v2.9.0" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" + }, + { + "name": "psp-deny-hostipc", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "rule-can-ssh-to-pod-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "denies pods with SSH ports opened(22/222)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" + }, + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useUntilKubescapeVersion": "v2.3.8" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + }, + { + "name": "psp-required-drop-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs don't have requiredDropCapabilities\n\t# if even one PSP has requiredDropCapabilities, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot has_requiredDropCapabilities(psp.spec)\n\t}\n\n\t# return al the PSPs that don't have requiredDropCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot has_requiredDropCapabilities(psp.spec)\n\n\tfixpath := {\"path\":\"spec.requiredDropCapabilities[0]\", \"value\":\"ALL\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' doesn't have requiredDropCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\nhas_requiredDropCapabilities(spec) {\n\tcount(spec.requiredDropCapabilities) > 0\n}\n" + }, + { + "name": "set-seccomp-profile", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "ensure-https-loadbalancers-encrypted-with-tls-aws", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "ruleDependencies": [], + "relevantCloudProviders": [ + "EKS" + ], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# deny LoadBalancer services that are configured for ssl connection (port: 443), but don't have TLS certificate set.\ndeny[msga] {\n\n\twl_kind := \"Service\"\n\twl_type := \"LoadBalancer\"\n\twl_required_annotation := \"service.beta.kubernetes.io/aws-load-balancer-ssl-cert\"\n\n\t# filterring LoadBalancers\n\twl := \tinput[_]\n\twl.kind == wl_kind\n\twl.spec.type == wl_type\n\n\t# filterring loadbalancers with port 443.\n\twl.spec.ports[_].port == 443\n\n\t# filterring annotations without ssl cert confgiured.\n\tannotations := object.get(wl, [\"metadata\", \"annotations\"], [])\n\tssl_cert_annotations := [annotations[i] | annotation = i; startswith(i, wl_required_annotation)]\n\tcount(ssl_cert_annotations) == 0\n\n\t# prepare message data.\n\talert_message := sprintf(\"LoadBalancer '%v' has no TLS configured\", [wl.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\": sprintf(\"metadata.annotations['%v']\", [wl_required_annotation]), \"value\": \"AWS_LOADBALANCER_SSL_CERT\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wl\n\t\t}\n\t}\n}\n\n", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\tobj := input[_]\n\tobj.kind == \"Service\"\n\tobj.spec.type == \"LoadBalancer\"\n\tmsga := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n" + }, + { + "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" + }, + { + "name": "alert-mount-potential-credentials-paths", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "relevantCloudProviders": [ + "EKS", + "GKE", + "AKS" + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tstart_of_path := volumes_data[\"start_of_path\"]\n result := is_unsafe_paths(volume, start_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"start_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"start_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"start_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, start_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [start_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" + }, + { + "name": "service-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the kube scheduler is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue = matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" + }, + { + "name": "sudo-in-container-entrypoint", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, start_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" + }, + { + "name": "container-hostPort", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + }, + { + "name": "replicationcontroller-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ReplicationController" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "kubelet-protect-kernel-defaults", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the --protect-kernel-defaults argument is set to true.", + "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"protectKernelDefaults\"],\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Reject creating objects in a namespace that is undergoing termination.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NamespaceLifecycle` is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"NamespaceLifecycle\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"NamespaceLifecycle\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "namespace-without-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Namespace", + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace does not have service accounts (not incluiding default)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + }, + { + "name": "exposure-to-internet", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n\t\t \"reviewPaths\": failPath,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n\n # Make sure that they belong to the same namespace\n svc.metadata.namespace == ingress.metadata.namespace\n\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [\n\t\t{\n\t \"object\": ingress,\n\t\t \"reviewPaths\": result,\n\t \"failedPaths\": result,\n\t },\n\t\t{\n\t \"object\": svc,\n\t\t}\n ]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n" + }, + { + "name": "resources-cpu-limit-and-request", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.cpu_request_max", + "settings.postureControlInputs.cpu_request_min", + "settings.postureControlInputs.cpu_limit_min", + "settings.postureControlInputs.cpu_limit_max" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.cpu_request_max", + "name": "cpu_request_max", + "description": "Ensure a CPU resource request is set and is under this defined maximum value." + }, + { + "path": "settings.postureControlInputs.cpu_request_min", + "name": "cpu_request_min", + "description": "Ensure a CPU resource request is set and is above this defined minimum value." + }, + { + "path": "settings.postureControlInputs.cpu_limit_max", + "name": "cpu_limit_max", + "description": "Ensure a CPU resource limit is set and is under this defined maximum value." + }, + { + "path": "settings.postureControlInputs.cpu_limit_min", + "name": "cpu_limit_min", + "description": "Ensure a CPU resource limit is set and is above this defined minimum value." + } + ], + "description": "CPU limits and requests are not set.", + "remediation": "Ensure CPU limits and requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ==================================== no CPU requests =============================================\n# Fails if pod does not have container with CPU request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU requests\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU requests\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n# ============================================= cpu limits exceed min/max =============================================\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tpath := \"resources.limits.cpu\" \n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tpath := \"resources.limits.cpu\" \n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n \tpath := \"resources.limits.cpu\" \n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# ============================================= cpu requests exceed min/max =============================================\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tpath := \"resources.requests.cpu\" \n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tpath := \"resources.requests.cpu\" \n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tpath := \"resources.requests.cpu\" \n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n#################################################################################################################\n\n\nis_min_max_exceeded_cpu(container) = \"resources.limits.cpu\" {\n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n} else = \"resources.requests.cpu\" {\n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n} else = \"\"\n\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_min_limit_exceeded_cpu(cpu_limit)\n}\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_max_limit_exceeded_cpu(cpu_limit)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_max_request_exceeded_cpu(cpu_req)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_min_request_exceeded_cpu(cpu_req)\n}\n\nis_max_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_max := data.postureControlInputs.cpu_limit_max[_]\n\tcompare_max(cpu_limit_max, cpu_limit)\n}\n\nis_min_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_min := data.postureControlInputs.cpu_limit_min[_]\n\tcompare_min(cpu_limit_min, cpu_limit)\n}\n\nis_max_request_exceeded_cpu(cpu_req) {\n\tcpu_req_max := data.postureControlInputs.cpu_request_max[_]\n\tcompare_max(cpu_req_max, cpu_req)\n}\n\nis_min_request_exceeded_cpu(cpu_req) {\n\tcpu_req_min := data.postureControlInputs.cpu_request_min[_]\n\tcompare_min(cpu_req_min, cpu_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tto_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tto_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tto_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tto_number(given) > to_number(max)\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tto_number(split_given) < to_number(split_min)\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tto_number(split_given) < to_number(split_min)\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tto_number(split_given) < to_number(split_min)\n\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tto_number(given) < to_number(min)\n\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" + }, + { + "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "review-roles-with-aws-iam-authenticator", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresource.kind == \"Role\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"For namespace '%v', make sure Kubernetes RBAC users are managed with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156\", [resource.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resource\n\t\t}\n\t}\n}\n" + }, + { + "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "CVE-2022-3172", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apiregistration.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "APIService" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "apiserverinfo.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "APIServerInfo" + ] + } + ], + "ruleDependencies": [], + "description": "List aggregated API server APIServices if kube-api-server version is vulnerable to CVE-2022-3172", + "remediation": "Upgrade the Kubernetes version to one of the fixed versions. The following versions are fixed: `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\tversion = get_api_server_version(api_infos)\n\tis_api_server_version_affected(version)\n\n\t# Find the service that exposes the extended API\n\tservices = [obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\tcount(services) == 1\n\tservice = services[0]\n\n\tmsg := {\n\t\t\"alertMessage\": \"the following pair of APIService and Service may redirect client traffic to any URL\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj, service]},\n\t}\n}\n\n# current kubescpae version (v2.0.171) still not support this resource\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tsemver.is_valid(v)\n\tversion = v\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tnot semver.is_valid(v)\n\tversion := \"\"\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) != 1\n\tversion = \"\"\n}\n\nis_api_server_version_affected(version) {\n\tversion == \"\"\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.25.0\") == 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.24.0\") >= 0\n\tsemver.compare(version, \"1.24.4\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.23.0\") >= 0\n\tsemver.compare(version, \"1.23.10\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.22.0\") >= 0\n\tsemver.compare(version, \"1.22.13\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.21.14\") <= 0\n}\n", + "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\t# Find the service that exposes the extended API\n\tservices = [ obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\n\tmsg := {\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n" + }, + { + "name": "image-pull-policy-is-not-set-to-always", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" + }, + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```\n\n#### Impact Statement\nThis admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies\n\n#### Default Value\nBy default, `SecurityContextDeny` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\":\"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster\", \n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"SecurityContextDeny\" in flag.values\n\tnot \"PodSecurityPolicy\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"SecurityContextDeny\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=SecurityContextDeny\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "psp-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" + }, + { + "name": "psp-deny-root-container", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" + }, + { + "name": "etcd-peer-tls-enabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```\n\n#### Impact Statement\netcd cluster peers would need to set up TLS for their communication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if peer tls is enabled in etcd cluster\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd encryption for peer connection is not enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--peer-cert-file\", \"\"],\n\t\t[\"--peer-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [\"spec.containers[0].command\"],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-cafile` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to use SSL Certificate Authority file for etcd\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--etcd-cafile\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--etcd-cafile=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to serve only HTTPS traffic\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--tls-cert-file\", \"\"],\n\t\t[\"--tls-private-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "linux-hardening", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define any linux security hardening", + "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" + }, + { + "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable anonymous requests to the API server.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "kubelet-event-qps", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", + "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"eventRecordQPS\"],\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Retain the logs for at least 30 days or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_value(cmd) = {\"origin\": origin, \"value\": value} {\n\tre := \" ?--audit-log-maxage=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalue = to_number(matchs[0][1])\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag = get_flag_value(cmd[i])\n\tflag.value < 30\n\tfixed = replace(cmd[i], flag.origin, \"--audit-log-maxage=30\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"alert\": sprintf(\"Audit log retention period is %v days, which is too small (should be at least 30 days)\", [flag.value]),\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxage\")\n\tresult = {\n\t\t\"alert\": \"Audit log retention period is not set\",\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%v]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-maxage=30\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "rule-access-dashboard-subject-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".crt\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "alert-rw-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + }, + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines if any workload contains a hostPath volume with rw permissions", + "remediation": "Set the readOnly field of the mount to true", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"deletePaths\": failed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n}" + }, + { + "name": "rule-cni-enabled-aks", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if cni is not enabled like defined in:\n# https://learn.microsoft.com/en-us/azure/aks/use-network-policies#create-an-aks-cluster-and-enable-network-policy\ndeny[msga] {\n\tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot cni_enabled_aks(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"cni is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_describe,\n\t\t},\n\t}\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"azure\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"kubenet\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n" + }, + { + "name": "host-network-access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + }, + { + "name": "rule-identify-blocklisted-image-registries", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "useUntilKubescapeVersion": "v2.3.8" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.publicRegistries", + "settings.postureControlInputs.untrustedRegistries" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.publicRegistries", + "name": "Public registries", + "description": "Kubescape checks none of these public container registries are in use." + }, + { + "path": "settings.postureControlInputs.untrustedRegistries", + "name": "Registries block list", + "description": "Kubescape checks none of these user-provided container registries are in use." + } + ], + "description": "Identifying if pod container images are from unallowed registries", + "remediation": "Use images from safe registry", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\tregistry := untrusted_registries[_]\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\tregistry := public_registries[_]\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) = result {\n not contains(image, \"/\")\n result := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + }, + { + "name": "rule-identify-blocklisted-image-registries-v1", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "useFromKubescapeVersion": "v2.9.0" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.publicRegistries", + "settings.postureControlInputs.untrustedRegistries" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.publicRegistries", + "name": "Public registries", + "description": "Kubescape checks none of these public container registries are in use." + }, + { + "path": "settings.postureControlInputs.untrustedRegistries", + "name": "Registries block list", + "description": "Kubescape checks none of these user-provided container registries are in use." + } + ], + "description": "Identifying if pod container images are from unallowed registries", + "remediation": "Use images from safe registry", + "ruleQuery": "", + "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tuntrusted_or_public_registries(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\tregistry := untrusted_registries[_]\n\tstartswith(image, registry)\n\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\tregistry := public_registries[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}" + }, + { + "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not allow all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.\n\n#### Impact Statement\nOnly requests explicitly allowed by the admissions control plugins would be served.\n\n#### Default Value\n`AlwaysAdmit` is not in the list of default admission plugins.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\t\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"AlwaysAdmit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"AlwaysAdmit\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "horizontalpodautoscaler-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "autoscaling" + ], + "apiVersions": [ + "v2" + ], + "resources": [ + "HorizontalPodAutoscaler" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "rule-secrets-in-env-var", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if Pods have secrets in environment variables", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "psp-deny-privileged-container", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\twanted = [\n\t\t\"TLS_AES_128_GCM_SHA256\",\n\t\t\"TLS_AES_256_GCM_SHA384\",\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t]\n\tresult = invalid_flag(obj.spec.containers[0].command, wanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, wanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tmissing = [x | x = wanted[_]; not x in flag.values]\n\tcount(missing) > 0\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, missing)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "ensure-azure-rbac-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.", + "remediation": "Enable Azure RBAC on AKS by using the following command: az aks update -g -n --enable-azure-rbac", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails in case Azure RBAC is not set on AKS instance.\ndeny[msga] {\n \tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot isAzureRBACEnabled(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Azure RBAC is not set. Enable it using the command: az aks update -g -n --enable-azure-rbac\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"az aks update -g -n --enable-azure-rbac\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": cluster_describe\n\t\t},\n\t} \n}\n\n# isAzureRBACEnabled check if Azure RBAC is enabled into ClusterDescribe object\n# retrieved from azure cli.\nisAzureRBACEnabled(properties) {\n properties.aadProfile.enableAzureRBAC == true\n}\n" + }, + { + "name": "set-procmount-default", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n" + }, + { + "name": "exposed-rce-pods", + "attributes": { + "m$K8sThreatMatrix": "exposed-rce-pods", + "useFromKubescapeVersion": "v2.0.150", + "imageScanRelated": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service", + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "armo.vuln.images", + "image.vulnscan.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ImageVulnerabilities" + ] + } + ], + "description": "fails if known pods have exposed services and known vulnerabilities with remote code execution", + "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # At least one rce vulnerability\n filter_rce_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n\t\t\"reviewPaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_rce_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.categories.isRce == true\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", + "resourceEnumerator": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ; x.apiVersion == \"v1\"]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ; x.apiVersion == \"v1\"]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"] # TODO: x.apiVersion == \"--input--\" || x.apiVersion == \"--input--\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}" + }, + { + "name": "restrict-access-to-the-control-plane-endpoint", + "attributes": { + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\npackage armo_builtins\n\n# fails in case authorizedIPRanges is not set.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isAuthorizedIPRangesSet(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Parameter 'authorizedIPRanges' was not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n\n}\n\nisAuthorizedIPRangesSet(config) {\n\tcount(config.properties.apiServerAccessProfile.authorizedIPRanges) > 0\n}\n" + }, + { + "name": "pod-security-admission-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-default-service-accounts-has-only-default-roles", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"deletePaths\": [sprintf(\"subjects[%d]\", [i])],\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "etcd-unique-ca", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```\n\n#### Impact Statement\nAdditional management of the certificates and keys for the dedicated certificate authority will be required.\n\n#### Default Value\nBy default, no etcd certificate is created and used.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 2.7 https://workbench.cisecurity.org/sections/1126654/recommendations/1838578\n\ndeny[msga] {\n\tetcdPod := [pod | pod := input[_]; filter_input(pod, \"etcd\")]\n\tetcdCheckResult := get_argument_value_with_path(etcdPod[0].spec.containers[0].command, \"--trusted-ca-file\")\n\n\tapiserverPod := [pod | pod := input[_]; filter_input(pod, \"kube-apiserver\")]\n\tapiserverCheckResult := get_argument_value_with_path(apiserverPod[0].spec.containers[0].command, \"--client-ca-file\")\n\n\tetcdCheckResult.value == apiserverCheckResult.value\n\tmsga := {\n\t\t\"alertMessage\": \"Cert file is the same both for the api server and the etcd\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"failedPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"fixPaths\": [etcdCheckResult.fix_paths, apiserverCheckResult.fix_paths],\n\t\t\"alertObject\": {\"k8sApiObjects\": [etcdPod[0], apiserverPod[0]]},\n\t}\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"kube-apiserver\")\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"etcd\")\n}\n\nfilter_input(obj, res) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], res)\n}\n\nget_argument_value(command, argument) = value {\n\targs := split(command, \"=\")\n\tsome i, sprintf(\"%v\", [argument]) in args\n\tvalue := args[i + 1]\n}\n\nget_argument_value_with_path(cmd, argument) = result {\n\tcontains(cmd[i], argument)\n\targumentValue := get_argument_value(cmd[i], argument)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"path\": path,\n\t\t\"value\": argumentValue,\n\t\t\"fix_paths\": {\"path\": path, \"value\": \"\"},\n\t}\n}\n" + }, + { + "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, no encryption provider is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is set but not using one of the recommended providers\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# For each resource check if it does not have allowed provider\n\tfix_paths := [{\n\t\t\"path\": sprintf(\"resources[%d].providers[%d]\", [i, count(resource.providers)]),\n\t\t\"value\": \"{\\\"aescbc\\\" | \\\"secretbox\\\" | \\\"kms\\\" : }\", # must be string\n\t} |\n\t\tresource := config_file_content.resources[i]\n\t\tcount({true |\n\t\t\tsome provider in resource.providers\n\t\t\thas_one_of_keys(provider, [\"aescbc\", \"secretbox\", \"kms\"])\n\t\t}) == 0\n\t]\n\n\tcount(fix_paths) > 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using one of the allowed providers (aescbc, secretbox, kms)\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_key(x, k) {\n\t_ = x[k]\n}\n\nhas_one_of_keys(x, keys) {\n\thas_key(x, keys[_])\n}\n" + }, + { + "name": "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled", + "attributes": { + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "ruleQuery": "armo_builtins", + "rule": "\npackage armo_builtins\n\n# fails in case privateEndpoint.id parameter is not found on ClusterDescribe\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateEndpointEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Private endpoint not enabled.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateEndpointEnabled(config) {\n\tconfig.properties.privateEndpoint.id\n}\n" + }, + { + "name": "pod-security-admission-restricted-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Check if EndpointPublicAccess in enabled on a private node for EKS. A private node is a node with no public ips access.\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n\n\t# filter out private nodes\n\t\"0.0.0.0/0\" in config.Cluster.ResourcesVpcConfig.PublicAccessCidrs\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPublicAccess is enabled on a private node\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" + }, + { + "name": "resources-cpu-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU limits are not set.", + "remediation": "Ensure CPU limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" + }, + { + "name": "verify-image-signature", + "attributes": { + "useFromKubescapeVersion": "v2.1.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "ruleQuery": "armo_builtins", + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.trustedCosignPublicKeys", + "name": "Trusted Cosign public keys", + "description": "A list of trusted Cosign public keys that are used for validating container image signatures." + } + ], + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.containers[%v].image\", [i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" + }, + { + "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "pod-security-admission-restricted-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "workload-with-administrative-roles", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_start_of_path(wl)\n wl_spec := object.get(wl, start_of_path, [])\n\n # get service account wl is using\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(wl_spec, sa.metadata, wl.metadata)\n\n # check service account token is mounted\n is_sa_auto_mounted(wl_spec, sa)\n\n # check if sa has administrative roles\n role := input[_]\n role.kind in [\"Role\", \"ClusterRole\"]\n is_administrative_role(role)\n\n rolebinding := input[_]\n\trolebinding.kind in [\"RoleBinding\", \"ClusterRoleBinding\"] \n rolebinding.roleRef.name == role.metadata.name\n rolebinding.subjects[j].kind == \"ServiceAccount\"\n rolebinding.subjects[j].name == sa.metadata.name\n rolebinding.subjects[j].namespace == sa.metadata.namespace\n\n reviewPath := \"roleRef\"\n deletePath := sprintf(\"subjects[%d]\", [j])\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v has administrative roles\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa,\n },\n {\n \"object\": rolebinding,\n\t\t \"reviewPaths\": [reviewPath],\n \"deletePaths\": [deletePath],\n },\n {\n \"object\": role,\n },]\n }\n}\n\n\nget_start_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken not in pod spec\n not wl_spec.automountServiceAccountToken == false\n not wl_spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n}\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken set to true in pod spec\n wl_spec.automountServiceAccountToken == true\n}\n\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n wl_spec.serviceAccountName == sa_metadata.name\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n not wl_spec.serviceAccountName \n sa_metadata.name == \"default\"\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\n# is_same_namespace supports cases where ns is not configured in the metadata\n# for yaml scans\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n\nis_administrative_role(role){\n administrative_resources := [\"*\"]\n administrative_verbs := [\"*\"]\n administrative_api_groups := [\"\", \"*\"]\n \n administrative_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in administrative_resources ; \n rule.verbs[b] in administrative_verbs ; \n rule.apiGroups[c] in administrative_api_groups]\n count(administrative_rule) > 0\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" + }, + { + "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the Controller Manager API service is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue =matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "set-supplementalgroups-values", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n\tnot pod.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n\tnot wl.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n\tnot cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n" + }, + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Use individual service account credentials for each controller.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```\n\n#### Impact Statement\nWhatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.\n\n#### Default Value\nBy default, `--use-service-account-credentials` is set to false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"--use-service-account-credentials is set to false in the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--use-service-account-credentials=false\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--use-service-account-credentials=true\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--use-service-account-credentials\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--use-service-account-credentials=true\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "ensure-clusters-are-created-with-private-nodes", + "attributes": { + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\n# fails in case enablePrivateCluster is set to false.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateClusterEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Cluster does not have private nodes.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr\",\n \t\"alertObject\": {\n\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateClusterEnabled(config) {\n\tconfig.properties.apiServerAccessProfile.enablePrivateCluster == true\n}\n" + }, + { + "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" + }, + { + "name": "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled or EndpointPublicAccess is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\t\t\n\tis_endpointaccess_misconfigured(config)\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled, or EndpointPublicAccess is enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs='203.0.113.5/32'\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n# check if EndpointPrivateAccess is disabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false\n}\n\n# check if EndpointPublicAccess is enabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n}\n\n" + }, + { + "name": "etcd-auto-tls-disabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Do not use self-signed certificates for TLS.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```\n\n#### Impact Statement\nClients will not be able to use self-signed certificates for TLS.\n\n#### Default Value\nBy default, `--auto-tls` is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Auto tls is enabled. Clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--auto-tls=true\")\n\tfixed = replace(cmd[i], \"--auto-tls=true\", \"--auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "set-sysctls-params", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.sysctls is not set.", + "remediation": "Set securityContext.sysctls params", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has sysctls set\n not pod.spec.securityContext.sysctls\n\n path := \"spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.sysctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has sysctls set\n not wl.spec.template.spec.securityContext.sysctls\n\n path := \"spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.sysctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has sysctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.sysctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not disable the secure port.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.\n\n#### Impact Statement\nYou need to set the API Server up with the right TLS certificates.\n\n#### Default Value\nBy default, port 6443 is used as the secure port.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tcontains(obj.spec.containers[0].command[i], \"--secure-port=0\")\n\tmsg := {\n\t\t\"alertMessage\": \"the secure port is disabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"failedPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable kubelet server certificate rotation on controller-manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"`RotateKubeletServerCertificate` is set to false on the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"RotateKubeletServerCertificate=false\")\n\tfixed = replace(cmd[i], \"RotateKubeletServerCertificate=false\", \"RotateKubeletServerCertificate=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "kubelet-streaming-connection-idle-timeout", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if a kubelet has not disabled timeouts on streaming connections", + "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--client-ca-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server communication is not encrypted properly\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-ca-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "CVE-2022-0185", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "LinuxKernelVariables" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n\n parsed_kernel_version_arr := parse_kernel_version_to_array(node.status.nodeInfo.kernelVersion)\n is_azure := parsed_kernel_version_arr[4] == \"azure\"\n\n is_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure)\n\n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n \"reviewPaths\": [\"kernelVersion\"],\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\n# General Kernel versions are between 5.1.1 and 5.16.2\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == false\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 16\n parsed_kernel_version_arr[2] < 2\n}\n\n# Azure kernel version with is 5.4.0-1067-azure\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == true\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 4\n parsed_kernel_version_arr[2] == 0\n parsed_kernel_version_arr[3] < 1067\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}\n\nparse_kernel_version_to_array(kernel_version_str) = output {\n\tversion_triplet := regex.find_n(`(\\d+\\.\\d+\\.\\d+)`, kernel_version_str,-1)\n version_triplet_array := split(version_triplet[0],\".\")\n\n build_vendor := regex.find_n(`-(\\d+)-(\\w+)`, kernel_version_str,-1)\n build_vendor_array := split(build_vendor[0],\"-\")\n\n output := [to_number(version_triplet_array[0]),to_number(version_triplet_array[1]),to_number(version_triplet_array[2]),to_number(build_vendor_array[1]),build_vendor_array[2]]\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" + }, + { + "name": "system-authenticated-allowed-to-take-over-cluster", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in system:authenticated user has cluster takeover rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow system:authenticated users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n subjectVector := input[_]\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\n subject := rolebinding.subjects[k]\n # Check if the subject is gourp\n subject.kind == \"Group\"\n # Check if the subject is system:authenticated\n subject.name == \"system:authenticated\"\n\n\n # Find the bound roles\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n # Check if the role and rolebinding bound\n is_same_role_and_binding(role, rolebinding)\n\n\n # Check if the role has access to workloads, exec, attach, portforward\n\trule := role.rules[p]\n rule.resources[l] in [\"*\",\"pods\", \"pods/exec\", \"pods/attach\", \"pods/portforward\",\"deployments\",\"statefulset\",\"daemonset\",\"jobs\",\"cronjobs\",\"nodes\",\"secrets\"]\n\n\tfinalpath := array.concat([\"\"], [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [i]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": \"system:authenticated has sensitive roles\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\" : subjectVector\n\t\t},\n\t}\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"RoleBinding\"\n role.kind == \"Role\"\n rolebinding.metadata.namespace == role.metadata.namespace\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"ClusterRoleBinding\"\n role.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}" + }, + { + "name": "pod-security-admission-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + }, + { + "name": "list-all-namespaces", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + } + ], + "ruleDependencies": [], + "description": "lists all namespaces for users to review", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "exposed-sensitive-interfaces-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "configInputs": [ + "settings.postureControlInputs.sensitiveInterfaces" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveInterfaces", + "name": "Sensitive interfaces", + "description": "List of known software interfaces that should not generally be exposed to the Internet." + } + ], + "description": "fails if known interfaces have exposed services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" + }, + { + "name": "rbac-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" + }, + { + "name": "drop-capability-netraw", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "set-fsgroupchangepolicy-value", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" + }, + { + "name": "persistentvolumeclaim-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PersistentVolumeClaim" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "rule-hostile-multitenant-workloads", + "attributes": { + "actionRequired": "manual review" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "ruleDependencies": [], + "configInputs": [], + "controlConfigInputs": [], + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", + "remediation": "Use physically isolated clusters", + "ruleQuery": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not always authorize all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```\n\n#### Impact Statement\nOnly authorized requests will be served.\n\n#### Default Value\nBy default, `AlwaysAllow` is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"AlwaysAllow authorization mode is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# Check if include AlwaysAllow\n\t\"AlwaysAllow\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val = flag.values[_]; val != \"AlwaysAllow\"]\n\tfixed_flag = get_fixed_flag(fixed_values)\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\n\nget_fixed_flag(values) = fixed {\n\tcount(values) == 0\n\tfixed = \"--authorization-mode=RBAC\" # If no authorization-mode, set it to RBAC, as recommended by CIS\n}\nget_fixed_flag(values) = fixed {\n\tcount(values) > 0\n\tfixed = sprintf(\"--authorization-mode=%s\", [concat(\",\", values)])\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "pv-without-encryption", + "attributes": { + "useFromKubescapeVersion": "v3.0.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PersistentVolume" + ] + }, + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "StorageClass" + ] + } + ], + "description": "PersistentVolume without encryption", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n\tpv := input[_]\n\tpv.kind == \"PersistentVolume\"\n\n\t# Find the related storage class\n\tstorageclass := input[_]\n\tstorageclass.kind == \"StorageClass\"\n\tpv.spec.storageClassName == storageclass.metadata.name\n\n\t# Check if storage class is encrypted\n\tnot is_storage_class_encrypted(storageclass)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Volume '%v' has is using a storage class that does not use encryption\", [pv.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": \"pv.spec.storageClassName\",\n\t\t\t\"value\": \"\"\n }],\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pv]}\n\t}\n}\n\n# Storage class is encrypted - AWS\nis_storage_class_encrypted(storageclass) {\n\tstorageclass.parameters.encrypted == \"true\"\n}\n\n# Storage class is encrypted - Azure\nis_storage_class_encrypted(storageclass) {\n\tstorageclass.provisioner\n\tcontains(storageclass.provisioner,\"azure\")\n}\n\n# Storage class is encrypted - GCP\nis_storage_class_encrypted(storageclass) {\n\t# GKE encryption is enabled by default https://cloud.google.com/blog/products/containers-kubernetes/exploring-container-security-use-your-own-keys-to-protect-your-data-on-gke\n\tstorageclass.provisioner\n\tcontains(storageclass.provisioner,\"csi.storage.gke.io\")\n}\n\n" + }, + { + "name": "k8s-common-labels-usage", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.k8sRecommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.k8sRecommendedLabels", + "name": "Kubernetes Recommended Labels", + "description": "Kubescape checks that workloads have at least one of this list of configurable labels, as recommended in the Kubernetes documentation." + } + ], + "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" + }, + { + "name": "rolebinding-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "csistoragecapacity-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "CSIStorageCapacity" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "list-all-mutating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Validate admission controller" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns mutating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" + }, + { + "name": "workload-mounted-pvc", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts PVC", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + }, + { + "name": "kubelet-authorization-mode-alwaysAllow", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "Change authorization mode to Webhook.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [\"authorization.mode\"],\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "serviceaccount-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ensure-network-policy-is-enabled-eks", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# EKS supports Calico and Cilium add-ons, both supports Network Policy.\n# Deny if at least on of them is not in the list of CNINames.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfos(obj)\n\n\tnot \"Calico\" in obj.data.CNINames\n\tnot \"Cilium\" in obj.data.CNINames\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfos(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" + }, + { + "name": "psp-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" + }, + { + "name": "resources-memory-limit-and-request", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.memory_request_max", + "settings.postureControlInputs.memory_request_min", + "settings.postureControlInputs.memory_limit_max", + "settings.postureControlInputs.memory_limit_min" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.memory_request_max", + "name": "memory_request_max", + "description": "Ensure a memory resource request is set and is under this defined maximum value." + }, + { + "path": "settings.postureControlInputs.memory_request_min", + "name": "memory_request_min", + "description": "Ensure a memory resource request is set and is above this defined minimum value." + }, + { + "path": "settings.postureControlInputs.memory_limit_max", + "name": "memory_limit_max", + "description": "Ensure a memory resource limit is set and is under this defined maximum value." + }, + { + "path": "settings.postureControlInputs.memory_limit_min", + "name": "memory_limit_min", + "description": "Ensure a memory resource limit is set and is under this defined maximum value." + } + ], + "description": "memory limits and requests are not set.", + "remediation": "Ensure memory limits and requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# ================================== no memory requests ==================================\n# Fails if pod does not have container with memory requests\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n\n# ============================================= memory requests exceed min/max =============================================\n\n# Fails if pod exceeds memory request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n\tpath := \"resources.requests.memory\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n\tpath := \"resources.requests.memory\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n\tpath := \"resources.requests.memory\" \n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# ============================================= memory limits exceed min/max =============================================\n\n# Fails if pod exceeds memory-limit \ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n\tpath := \"resources.limits.memory\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory-limit \", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory-limit \ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n\tpath := \"resources.limits.memory\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory-limit \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n\tpath := \"resources.limits.memory\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n######################################################################################################\n\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_min_limit_exceeded_memory(memory_limit)\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_max_limit_exceeded_memory(memory_limit)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_max_request_exceeded_memory(memory_req)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_min_request_exceeded_memory(memory_req)\n}\n\n# helpers\n\nis_max_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_max := data.postureControlInputs.memory_limit_max[_]\n\tcompare_max(memory_limit_max, memory_limit)\n}\n\nis_min_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_min := data.postureControlInputs.memory_limit_min[_]\n\tcompare_min(memory_limit_min, memory_limit)\n}\n\nis_max_request_exceeded_memory(memory_req) {\n\tmemory_req_max := data.postureControlInputs.memory_request_max[_]\n\tcompare_max(memory_req_max, memory_req)\n}\n\nis_min_request_exceeded_memory(memory_req) {\n\tmemory_req_min := data.postureControlInputs.memory_request_min[_]\n\tcompare_min(memory_req_min, memory_req)\n}\n\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n to_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n to_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n to_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tto_number(split_given) < to_number(split_min)\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tto_number(split_given) < to_number(split_min)\n\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tto_number(split_given) < to_number(split_min)\n\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tto_number(given) < to_number(min)\n\n}\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" + }, + { + "name": "psp-deny-allowed-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + }, + { + "name": "configured-readiness-probe", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Readiness probe is not configured", + "remediation": "Ensure Readiness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "alert-container-optimized-os-not-in-use", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n\n# checks if a node is not using a \"Container-Optimized OS\". \n# \"Container-Optimized OS\" prefixes are configured in 'container_optimized_os_prefixes'. \n# deny if 'nodes.status.nodeInfo.osImage' not starting with at least one item in 'container_optimized_os_prefixes'.\ndeny[msga] {\n\n\tnodes := input[_]\n\tnodes.kind == \"Node\"\n\n\t# list of \"Container-Optimized OS\" images prefixes \n\tcontainer_optimized_os_prefixes = [\"Bottlerocket\"]\n\n\t# check if osImage starts with at least one prefix\n\tsome str in container_optimized_os_prefixes\n\tnot startswith(nodes.status.nodeInfo.osImage, str)\n\n\t# prepare message data.\n\talert_message := \"Prefer using Container-Optimized OS when possible\"\n\n\tfailedPaths:= [\"status.nodeInfo.osImage\"]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [nodes]\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "lease-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "coordination.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Lease" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "rule-allow-privilege-escalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "resources-memory-requests", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory requests are not set.", + "remediation": "Ensure memory requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ================================== no memory requests ==================================\n# Fails if pod does not have container with memory requests\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NodeRestriction` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"NodeRestriction is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"NodeRestriction\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"NodeRestriction\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=NodeRestriction\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Always pull images.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```\n\n#### Impact Statement\nCredentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.\n\n#### Default Value\nBy default, `AlwaysPullImages` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"Admission control policy is not set to AlwaysPullImages\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"AlwaysPullImages\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"AlwaysPullImages\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=AlwaysPullImages\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```\n\n#### Impact Statement\nYou need to setup and maintain root certificate authority file.\n\n#### Default Value\nBy default, `--root-ca-file` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--root-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--root-ca-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + }, + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Verify kubelet's certificate before establishing connection.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, `--kubelet-certificate-authority` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority file is not specified\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--kubelet-certificate-authority\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--kubelet-certificate-authority=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ingress-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "serviceaccount-token-mount", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n spec := object.get(wl, start_of_path, [])\n\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(spec, sa.metadata.name)\n is_same_namespace(sa.metadata , wl.metadata)\n has_service_account_binding(sa)\n result := is_sa_auto_mounted_and_bound(spec, start_of_path, sa)\n\n failed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"fixPaths\": fixed_path,\n \"reviewPaths\": failed_path,\n \"failedPaths\": failed_path,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa\n }]\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted_and_bound(spec, start_of_path, sa) = [failed_path, fix_path] {\n # automountServiceAccountToken not in pod spec\n not spec.automountServiceAccountToken == false\n not spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n\n fix_path = { \"path\": sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", start_of_path)]), \"value\": \"false\"}\n failed_path = \"\"\n}\n\nis_sa_auto_mounted_and_bound(spec, start_of_path, sa) = [failed_path, fix_path] {\n # automountServiceAccountToken set to true in pod spec\n spec.automountServiceAccountToken == true\n\n failed_path = sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", start_of_path)])\n fix_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n paths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n paths[1] != \"\"\n} else = []\n\n\nis_same_sa(spec, serviceAccountName) {\n spec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n not spec.serviceAccountName \n serviceAccountName == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the given ServiceAccount\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == service_account.metadata.name\n role_binding.subjects[_].namespace == service_account.metadata.namespace\n role_binding.subjects[_].kind == \"ServiceAccount\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the system:authenticated group\n# which gives access to all authenticated users, including service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:authenticated\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the \"system:serviceaccounts\" group\n# which gives access to all service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:serviceaccounts\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" + }, + { + "name": "rule-deny-cronjobs", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob" + }, + "ruleLanguage": "rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if it's cronjob", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# alert cronjobs\n\n# handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" + }, + { + "name": "validate-kubelet-tls-configuration-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletConfiguration", + "KubeletCommandLine" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", + "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t# get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "pod-security-admission-baseline-applied-2", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "ensure-that-the-cni-in-use-supports-network-policies", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [], + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" + }, + { + "name": "ensure_network_policy_configured_in_labels", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "description": "fails if no networkpolicy configured in workload labels", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + }, + { + "name": "ensure-image-scanning-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "DescribeRepositories" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Check if image scanning enabled for EKS\ndeny[msga] {\n\tdescribe_repositories := input[_]\n\tdescribe_repositories.apiVersion == \"eks.amazonaws.com/v1\"\n\tdescribe_repositories.kind == \"DescribeRepositories\"\n\tdescribe_repositories.metadata.provider == \"eks\"\n\trepos := describe_repositories.data.Repositories\n\tsome repo in repos\n\tnot image_scanning_configured(repo)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": \"image scanning is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": describe_repositories,\n\t\t},\n\t}\n}\n\nimage_scanning_configured(repo) {\n\trepo.ImageScanningConfiguration.ScanOnPush == true\n}" + }, + { + "name": "automount-default-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" + }, + { + "name": "ingress-and-egress-blocked", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if there are no ingress and egress defined for pod", + "remediation": "Make sure you define ingress and egress policies for all your Pods", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" + }, + { + "name": "encrypt-traffic-to-https-load-balancers-with-tls-certificates", + "attributes": { + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails in case of 'Services' of type 'LoadBalancer' are not found.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type != \"LoadBalancer\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"No LoadBalancer service found.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n\t\t}\n\t}\n}\n\n# fails in case 'Service' object has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tnot svc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"]\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Service' object has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] != \"true\"\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Ingress' object has spec.tls value not set.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tnot isTLSSet(ingress.spec)\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has 'spec.tls' value not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"spec.tls\"],\n \t\"failedPaths\": [\"spec.tls\"],\n \t\"fixPaths\":[],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\n# fails in case 'Ingress' object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tisTLSSet(ingress.spec)\n\tingress.metadata.annotations[\"kubernetes.io/ingress.class\"] != \"azure/application-gateway\"\n\n\tpath := \"metadata.annotations[kubernetes.io/ingress.class]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"azure/application-gateway\"}],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\nisTLSSet(spec) {\n\tcount(spec.tls) > 0\n}\n" + }, + { + "name": "rule-can-delete-k8s-events-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can delete events", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "endpointslice-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "discovery.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "EndpointSlice" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "insecure-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + }, + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "list-all-validating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Validate admission controller" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns validating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" + }, + { + "name": "etcd-client-auth-cert", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Enable client authentication on etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```\n\n#### Impact Statement\nAll clients attempting to access the etcd server will require a valid client certificate.\n\n#### Default Value\nBy default, the etcd service can be queried by unauthenticated clients.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--client-cert-auth=false\", \"--client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "configured-liveness-probe", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Liveness probe is not configured", + "remediation": "Ensure Liveness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Encrypt etcd key-value store.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--encryption-provider-config` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is not set at all\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\n\tcmd := obj.spec.containers[0].command\n\tnot contains(concat(\" \", cmd), \"--encryption-provider-config\")\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config file not set\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--encryption-provider-config=\",\n\t\t}],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n# Encryption config is set but not covering secrets\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# Check if the config conver secrets\n\tcount({true | \"secrets\" in config_file_content.resources[_].resources}) == 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not covering secrets\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tfilter_input(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nfilter_input(obj){\n\tis_api_server(obj)\n}\nfilter_input(obj){\n\tis_control_plane_info(obj)\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeProxyInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "rule-access-dashboard-wl-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" + }, + { + "name": "label-usage-for-resources", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.recommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.recommendedLabels", + "name": "Recommended Labels", + "description": "Kubescape checks that workloads have at least one label that identifies semantic attributes." + } + ], + "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" + }, + { + "name": "rule-can-portforward-v1", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "naked-pods", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", + "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" + }, + { + "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-scheduler\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" + }, + { + "name": "psp-deny-hostpid", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "psp-deny-allowprivilegeescalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "secret-etcd-encryption-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "CVE-2022-39328", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tclean_image := replace(image,\"-ubuntu\",\"\")\n\tversion := split(clean_image, \":\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 9\n\tminorVersion == 2\n\tsubVersion < 4\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + }, + { + "name": "rbac-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Activate garbage collector on pod termination, as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--terminated-pod-gc-threshold` is set to `12500`.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--terminated-pod-gc-threshold\")\n\tresult = {\n\t\t\"alert\": \"Please validate that --terminated-pod-gc-threshold is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--terminated-pod-gc-threshold\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"--terminated-pod-gc-threshold flag not set to an appropriate value\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--terminated-pod-gc-threshold=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"etcd is not configured to use TLS properly\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--etcd-certfile\", \"\"],\n\t\t[\"--etcd-keyfile\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "access-container-service-account-v1", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which service accounts can be used to access other resources in the cluster", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" + }, + { + "name": "alert-any-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" + }, + { + "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", + "attributes": { + "hostSensorRule": "true", + "useFromKubescapeVersion": "v2.0.159" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxsize\")\n\tresult = {\n\t\t\"alert\": \"Please validate that audit-log-maxsize has an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxsize\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max size not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxsize=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubernetes API Server is not audited\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-path\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-path=/var/log/apiserver/audit.log\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "read-only-port-enabled-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet has read-only port enabled.", + "remediation": "Start the kubelet with the --read-only-port flag set to 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"reviewPaths\": [\"readOnlyPort\"],\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + }, + { + "name": "has-image-signature", + "attributes": { + "useFromKubescapeVersion": "v2.1.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Ensures that all images contain some signature", + "remediation": "Replace the image with a signed image", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" + }, + { + "name": "audit-policy-content", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "APIServerInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\nimport future.keywords.in\n\n# CIS 3.2.2 https://workbench.cisecurity.org/sections/1126657/recommendations/1838583\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\tapi_server_info := obj.data.APIServerInfo\n\n\tnot contains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": api_server_info.cmdLine,\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\n\tapi_server_info := obj.data.APIServerInfo\n\n\tcontains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\trawPolicyFile := api_server_info.auditPolicyFile\n\tpolicyFile = yaml.unmarshal(base64.decode(rawPolicyFile.content))\n\n\tare_audit_file_rules_valid(policyFile.rules)\n\n\tfailed_obj := json.patch(policyFile, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"metadata\",\n\t\t\"value\": {\"name\": sprintf(\"%s - Audit policy file\", [obj.metadata.name])},\n\t}])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit policy rules do not cover key security areas or audit levels are invalid\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\n# Sample rules object\n# rules:\n# - level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nare_audit_file_rules_valid(rules) if {\n\tseeked_resources_with_audit_level := {\n\t\t\"secrets\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"configmaps\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"tokenreviews\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"pods\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"deployments\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/exec\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/portforward\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"services/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t}\n\n\t# Policy file must contain every resource\n\tsome resource, config in seeked_resources_with_audit_level\n\n\t# Every seeked resource mu have valid audit levels\n\tnot test_all_rules_against_one_seeked_resource(resource, config, rules)\n}\n\ntest_all_rules_against_one_seeked_resource(seeked_resource, value_of_seeked_resource, rules) if {\n\t# Filter down rules to only those concerning a seeked resource\n\trules_with_seeked_resource := [rule | rule := rules[_]; is_rule_concering_seeked_resource(rule, seeked_resource)]\n\trules_count := count(rules_with_seeked_resource)\n\n\t# Move forward only if there are some\n\trules_count > 0\n\n\t# Check if rules concerning seeked resource have valid audit levels\n\tvalid_rules := [rule | rule := rules_with_seeked_resource[_]; validate_rule_audit_level(rule, value_of_seeked_resource)]\n\tvalid_rules_count := count(valid_rules)\n\n\tvalid_rules_count > 0\n\n\t# Compare all rules for that specififc resource with those with valid rules, if amount of them differs,\n\t# it means that there are also some rules which invalid audit level\n\tvalid_rules_count == rules_count\n}\n\nis_rule_concering_seeked_resource(rule, seeked_resource) if {\n\tseeked_resource in rule.resources[_].resources\n}\n\n# Sample single rule:\n# \t level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nvalidate_rule_audit_level(rule, value_of_seeked_resource) := result if {\n\tvalue_of_seeked_resource.mode == \"equal\"\n\tresult := rule.level == value_of_seeked_resource.auditLevel\n} else := result {\n\tresult := rule.level != value_of_seeked_resource.auditLevel\n}\n\nis_api_server_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}" + }, + { + "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"DenyServiceExternalIPs\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "pod-security-admission-baseline-applied-1", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "insecure-port-flag", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" + }, + { + "name": "rule-identify-old-k8s-registry", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Identifying if pod container images are from deprecated K8s registry", + "remediation": "Use images new registry", + "ruleQuery": "", + "rule": "package armo_builtins\n\ndeprecatedK8sRepo[msga] {\n\tpod := input[_]\n\tpod.metadata.namespace == \"kube-system\"\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecated_registry(image){\n\tstartswith(image, \"k8s.gcr.io/\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.metadata.namespace == \"kube-system\"\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\n" + }, + { + "name": "ensure-endpointprivateaccess-is-enabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" + }, + { + "name": "ingress-no-tls", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "Ingress should not be configured without TLS", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\n\t# Check if ingress has TLS enabled\n\tnot ingress.spec.tls\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Ingress '%v' has not TLS definition\", [ingress.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n \"path\": \"spec.tls\",\n \"value\": \"\"\n }],\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"k8sApiObjects\": [ingress]}\n\t}\n}\n" + }, + { + "name": "resource-policies", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace has no resource policies defined", + "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tstart_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, start_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tstart_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, start_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, start_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" + }, + { + "name": "excessive_amount_of_vulnerabilities_pods", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed critical vulnerable pods", + "useFromKubescapeVersion": "v1.0.133", + "imageScanRelated": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "armo.vuln.images", + "image.vulnscan.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ImageVulnerabilities" + ] + } + ], + "configInputs": [ + "settings.postureControlInputs.max_critical_vulnerabilities", + "settings.postureControlInputs.max_high_vulnerabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.max_critical_vulnerabilities", + "name": "Max Critical vulnerabilities", + "description": "The maximum number of Critical severity vulnerabilities permitted." + }, + { + "path": "settings.postureControlInputs.max_high_vulnerabilities", + "name": "Max High vulnerabilities", + "description": "The maximum number of High severity vulnerabilities permitted." + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # Has ^ amount of vulnerabilities\n check_num_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \"reviewPaths\": [path],\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"Critical\" ])\n\n str_max := data.postureControlInputs.max_critical_vulnerabilities[_]\n exists > to_number(str_max)\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"High\" ])\n\n str_max := data.postureControlInputs.max_high_vulnerabilities[_]\n exists > to_number(str_max)\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n \n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}" + }, + { + "name": "kubelet-rotate-certificates", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --rotate-certificates argument is not set to false.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"rotateCertificates\"],\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "rule-can-update-configmap-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can update/patch the 'coredns' configmap", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled. This could potentially be exploited to uncover system and program details.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--profiling=true\")\n\tfixed = replace(cmd[i], \"--profiling=true\", \"--profiling=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Turn on Role Based Access Control.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nWhen RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.\n\n#### Default Value\nBy default, `RBAC` authorization is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"RBAC\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"RBAC\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=RBAC\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "resources-secret-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Secret" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "kubelet-ip-tables", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensures that the --make-iptables-util-chains argument is set to true.", + "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "etcd-tls-enabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Configure TLS encryption for the etcd service.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```\n\n#### Impact Statement\nClient connections only over TLS would be served.\n\n#### Default Value\nBy default, TLS encryption is not set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if tls is configured in a etcd service\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--cert-file\", \"\"],\n\t\t[\"--key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Retain 10 or an appropriate number of old log files.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxbackup\")\n\tresult = {\n\t\t\"alert\": \"Please validate that the audit log max backup is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxbackup\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max backup is not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxbackup=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", + "attributes": { + "hostSensorRule": "true", + "useFromKubescapeVersion": "v2.0.159" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```\n\n#### Impact Statement\nThe corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-key-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-key-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "etcd-peer-auto-tls-disabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-auto-tls=true\")\n\tfixed = replace(cmd[i], \"--peer-auto-tls=true\", \"--peer-auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "containers-mounting-docker-socket", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/run/containerd/containerd.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/crio/crio.sock\"\n}\n" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + }, + { + "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "etcd-peer-client-auth-cert", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "etcd should be configured for peer authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--peer-client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--peer-client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--peer-client-cert-auth=false\", \"--peer-client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "resources-cpu-requests", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU requests are not set.", + "remediation": "Ensure CPU requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ==================================== no CPU requests =============================================\n# Fails if pod does not have container with CPU request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU requests\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU requests\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "anonymous-access-enabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in case anonymous or unauthenticated user has any rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow anonymous users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n subject := rolebinding.subjects[i]\n isAnonymous(subject)\n delete_path := sprintf(\"subjects[%d]\", [i])\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"deletePaths\": [delete_path],\n \"failedPaths\": [delete_path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(subject) {\n subject.name == \"system:anonymous\"\n}\n\nisAnonymous(subject) {\n subject.name == \"system:unauthenticated\"\n}\n" + }, + { + "name": "cluster-admin-role", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\n# regal ignore:rule-length\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "ensure-service-principle-has-read-only-permissions", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + }, + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if servicePrincipal has permissions that are not read-only\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"aks\"\n\n\troleAssignment := resources.data.roleAssignments[_]\n\troleAssignment.properties.principalType == \"ServicePrincipal\"\n\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"aks\"\n\n\tpolicy := policies.data.roleDefinitions[_]\n\tpolicy.id == roleAssignment.properties.roleDefinitionId\n\n\t# check if policy has at least one action that is not read\n\tsome action in policy.properties.permissions[_].actions\n\t\tnot endswith(action, \"read\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"ServicePrincipal has permissions that are not read-only to ACR.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" + }, + { + "name": "workload-mounted-configmap", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts ConfigMaps", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.configMap\n\n\tconfigMap := input[_]\n\tconfigMap.kind == \"ConfigMap\"\n\tconfigMap.metadata.name == volume.configMap.name\n\tis_same_namespace(configMap.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted configMap\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": configMap\n }]\n\t}\n}\n\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + }, + { + "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Automate service accounts management.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.\n\n#### Impact Statement\nNone.\n\n#### Default Value\nBy default, `ServiceAccount` is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"ServiceAccount\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"ServiceAccount\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Validate service account before validating token.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--service-account-lookup` argument is set to `true`.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) := invalid_flags[0] {\n\tinvalid_flags := [flag |\n\t\tsome i, c in cmd\n\t\tflag := get_result(c, i)\n\t]\n}\n\nget_result(cmd, i) = result {\n\tcmd == \"--service-account-lookup=false\"\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(cmd, i) = result {\n\tcmd != \"--service-account-lookup=false\"\n\tcontains(cmd, \"--service-account-lookup=false\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": replace(cmd, \"--service-account-lookup=false\", \"--service-account-lookup=true\"),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "rule-can-bind-escalate", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can or bind escalate roles/clusterroles", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "set-seLinuxOptions", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if workload and container do not define any seLinuxOptions", + "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + }, + { + "name": "poddisruptionbudget-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodDisruptionBudget" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "alert-fargate-not-in-use", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n\n\n# deny if fargate is not being used in any of the nodes in cluster.\n# a Node is identified as using fargate if it's name starts with 'fargate'.\ndeny[msga] {\n\n\n # get all nodes\n nodes := [node | node = input[_]; node.kind == \"Node\"]\n count(nodes) > 0\n\n # get all nodes without fargate\n nodes_not_fargate := [node | node = nodes[_]; not startswith(node.metadata.name, \"fargate\")]\n\n # if count of all nodes equals to count of nodes_not_fargate it means fargate is not being used.\n count(nodes) == count(nodes_not_fargate)\n\n\t# prepare message data.\n\talert_message := \"Consider Fargate for running untrusted workloads\"\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": nodes_not_fargate\n\t\t}\n\t}\n}" + }, + { + "name": "automount-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + }, + { + "name": "set-seccomp-profile-RuntimeDefault", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile as RuntimeDefault", + "remediation": "Make sure you define seccompProfile as RuntimeDefault at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n wl_spec := wl.spec\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl_spec := wl.spec.template.spec\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n wl_spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# container definition takes precedence\nget_seccompProfile_definition(wl, container, i, path_to_containers, path_to_search) = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type == \"RuntimeDefault\"\n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type != \"RuntimeDefault\"\n failed_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type == \"RuntimeDefault\" \n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type != \"RuntimeDefault\" \n\tfailed_path := sprintf(\"%s.%s\", [trim_suffix(concat(\".\", path_to_containers), \".containers\"), concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result{\n\tfix_path := [{\"path\": sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]), \"value\":\"RuntimeDefault\"}]\n\tseccompProfile_result := {\"failed\": true, \"failed_path\": [], \"fix_path\": fix_path}\n}\n" + }, + { + "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "endpoints-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Endpoints" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "CVE-2022-24348", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + }, + { + "name": "psp-deny-hostnetwork", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "role-in-default-namespace", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not use token based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.\n\n#### Impact Statement\nYou will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server TLS is not configured\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tre := \" ?--token-auth-file=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd[i], -1)\n\tcount(matchs) > 0\n\tfixed = replace(cmd[i], matchs[0][0], \"\")\n\tresult = get_result(sprintf(\"spec.containers[0].command[%d]\", [i]), fixed)\n}\n\n# Get fix and failed paths\nget_result(path, fixed) = result {\n\tfixed == \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(path, fixed) = result {\n\tfixed != \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed,\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "set-fsgroup-value", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n" + }, + { + "name": "rule-can-create-pod", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can create pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native-cis", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# CIS 3.2.1 https://workbench.cisecurity.org/sections/1126657/recommendations/1838582\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server(obj)\n\tcmd := obj.spec.containers[0].command\n\taudit_policy := [command | command := cmd[_]; contains(command, \"--audit-policy-file=\")]\n\tcount(audit_policy) < 1\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "list-role-definitions-in-acr", + "attributes": {}, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# return ListEntitiesForPolicies resource in azure\ndeny[msg] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.apiVersion == \"management.azure.com/v1\"\n\tresources.metadata.provider == \"aks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" + } +] \ No newline at end of file diff --git a/releaseDev/security.json b/releaseDev/security.json new file mode 100644 index 000000000..66a74c4a5 --- /dev/null +++ b/releaseDev/security.json @@ -0,0 +1,3407 @@ +{ + "name": "security", + "description": "Controls that are used to assess security threats.", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "security" + ], + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "version": null, + "controls": [ + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "insecure-port-flag", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + } + ] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + } + ] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-allow-privilege-escalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "immutable-container-filesystem", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" + } + ] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-pid-ipc-privileges", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + } + ] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-network-access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-rw-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + }, + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines if any workload contains a hostPath volume with rw permissions", + "remediation": "Set the readOnly field of the mount to true", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"deletePaths\": failed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n}" + } + ] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "insecure-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + } + ] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-any-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "containers-mounting-docker-socket", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/run/containerd/containerd.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/crio/crio.sock\"\n}\n" + } + ] + }, + { + "name": "Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" + ], + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + }, + { + "name": "immutable-container-filesystem", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" + }, + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + }, + { + "name": "drop-capability-netraw", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "set-seLinuxOptions", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if workload and container do not define any seLinuxOptions", + "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-seccomp-profile", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-procmount-default", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n" + }, + { + "name": "set-fsgroup-value", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n" + }, + { + "name": "set-fsgroupchangepolicy-value", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" + }, + { + "name": "set-sysctls-params", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.sysctls is not set.", + "remediation": "Set securityContext.sysctls params", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has sysctls set\n not pod.spec.securityContext.sysctls\n\n path := \"spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.sysctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has sysctls set\n not wl.spec.template.spec.securityContext.sysctls\n\n path := \"spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.sysctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has sysctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.sysctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "set-supplementalgroups-values", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n\tnot pod.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n\tnot wl.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n\tnot cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n" + }, + { + "name": "rule-allow-privilege-escalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "Workload with secret access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Secret Access" + ] + } + ] + }, + "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", + "controlID": "C-0255", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "workload-mounted-secrets", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Secret" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.secret\n\n\tsecret := input[_]\n\tsecret.kind == \"Secret\"\n\tsecret.metadata.name == volume.secret.secretName\n\tis_same_namespace(secret.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted secret\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": secret\n }]\n\t}\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Exposure to Internet", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-database-without-authentication", + "categories": [ + "Initial Access" + ] + } + ] + }, + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", + "controlID": "C-0256", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "exposure-to-internet", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n\t\t \"reviewPaths\": failPath,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n\n # Make sure that they belong to the same namespace\n svc.metadata.namespace == ingress.metadata.namespace\n\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [\n\t\t{\n\t \"object\": ingress,\n\t\t \"reviewPaths\": result,\n\t \"failedPaths\": result,\n\t },\n\t\t{\n\t \"object\": svc,\n\t\t}\n ]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n" + } + ] + }, + { + "name": "Workload with PVC access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", + "controlID": "C-0257", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "rules": [ + { + "name": "workload-mounted-pvc", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts PVC", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Workload with configMap access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", + "controlID": "C-0258", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "workload-mounted-configmap", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts ConfigMaps", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.configMap\n\n\tconfigMap := input[_]\n\tconfigMap.kind == \"ConfigMap\"\n\tconfigMap.metadata.name == volume.configMap.name\n\tis_same_namespace(configMap.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted configMap\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": configMap\n }]\n\t}\n}\n\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Workload with credential access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "This control checks if workloads specifications have sensitive information in their environment variables.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", + "controlID": "C-0259", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + } + ] + }, + { + "name": "Missing network policy", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ensure_network_policy_configured_in_labels", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "description": "fails if no networkpolicy configured in workload labels", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "ServiceAccount token mounted", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", + "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", + "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", + "controlID": "C-0261", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "serviceaccount-token-mount", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n spec := object.get(wl, start_of_path, [])\n\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(spec, sa.metadata.name)\n is_same_namespace(sa.metadata , wl.metadata)\n has_service_account_binding(sa)\n result := is_sa_auto_mounted_and_bound(spec, start_of_path, sa)\n\n failed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"fixPaths\": fixed_path,\n \"reviewPaths\": failed_path,\n \"failedPaths\": failed_path,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa\n }]\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted_and_bound(spec, start_of_path, sa) = [failed_path, fix_path] {\n # automountServiceAccountToken not in pod spec\n not spec.automountServiceAccountToken == false\n not spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n\n fix_path = { \"path\": sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", start_of_path)]), \"value\": \"false\"}\n failed_path = \"\"\n}\n\nis_sa_auto_mounted_and_bound(spec, start_of_path, sa) = [failed_path, fix_path] {\n # automountServiceAccountToken set to true in pod spec\n spec.automountServiceAccountToken == true\n\n failed_path = sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", start_of_path)])\n fix_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n paths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n paths[1] != \"\"\n} else = []\n\n\nis_same_sa(spec, serviceAccountName) {\n spec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n not spec.serviceAccountName \n serviceAccountName == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the given ServiceAccount\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == service_account.metadata.name\n role_binding.subjects[_].namespace == service_account.metadata.namespace\n role_binding.subjects[_].kind == \"ServiceAccount\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the system:authenticated group\n# which gives access to all authenticated users, including service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:authenticated\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the \"system:serviceaccounts\" group\n# which gives access to all service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:serviceaccounts\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" + } + ] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "anonymous-access-enabled", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in case anonymous or unauthenticated user has any rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow anonymous users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n subject := rolebinding.subjects[i]\n isAnonymous(subject)\n delete_path := sprintf(\"subjects[%d]\", [i])\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"deletePaths\": [delete_path],\n \"failedPaths\": [delete_path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(subject) {\n subject.name == \"system:anonymous\"\n}\n\nisAnonymous(subject) {\n subject.name == \"system:unauthenticated\"\n}\n" + } + ] + }, + { + "controlID": "C-0265", + "name": "Authenticated user has sensitive permissions", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "system-authenticated-allowed-to-take-over-cluster", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in system:authenticated user has cluster takeover rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow system:authenticated users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n subjectVector := input[_]\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\n subject := rolebinding.subjects[k]\n # Check if the subject is gourp\n subject.kind == \"Group\"\n # Check if the subject is system:authenticated\n subject.name == \"system:authenticated\"\n\n\n # Find the bound roles\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n # Check if the role and rolebinding bound\n is_same_role_and_binding(role, rolebinding)\n\n\n # Check if the role has access to workloads, exec, attach, portforward\n\trule := role.rules[p]\n rule.resources[l] in [\"*\",\"pods\", \"pods/exec\", \"pods/attach\", \"pods/portforward\",\"deployments\",\"statefulset\",\"daemonset\",\"jobs\",\"cronjobs\",\"nodes\",\"secrets\"]\n\n\tfinalpath := array.concat([\"\"], [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [i]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": \"system:authenticated has sensitive roles\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\" : subjectVector\n\t\t},\n\t}\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"RoleBinding\"\n role.kind == \"Role\"\n rolebinding.metadata.namespace == role.metadata.namespace\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"ClusterRoleBinding\"\n role.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}" + } + ] + }, + { + "name": "Workload with cluster takeover roles", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Cluster Access" + ], + "displayRelatedResources": true, + "clickableResourceKind": "ServiceAccount" + } + ] + }, + "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", + "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", + "long_description": "In Kubernetes, workloads with overly permissive roles pose a significant security risk. When a workload is granted roles that exceed the necessities of its operation, it creates an attack surface for privilege escalation within the cluster. This is especially critical if the roles include permissions for creating, updating, or accessing sensitive resources or secrets. An attacker exploiting such a workload can leverage these excessive privileges to perform unauthorized actions, potentially leading to a full cluster takeover. Ensuring that each service account associated with a workload is limited to permissions that are strictly necessary for its function is crucial in mitigating the risk of cluster takeovers.", + "test": "Check if the service account used by a workload has cluster takeover roles.", + "controlID": "C-0267", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "workload-with-cluster-takeover-roles", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_start_of_path(wl)\n wl_spec := object.get(wl, start_of_path, [])\n\n # get service account wl is using\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(wl_spec, sa.metadata, wl.metadata)\n\n # check service account token is mounted\n is_sa_auto_mounted(wl_spec, sa)\n\n # check if sa has cluster takeover roles\n role := input[_]\n role.kind in [\"Role\", \"ClusterRole\"]\n is_takeover_role(role)\n\n rolebinding := input[_]\n\trolebinding.kind in [\"RoleBinding\", \"ClusterRoleBinding\"] \n rolebinding.roleRef.name == role.metadata.name\n rolebinding.subjects[j].kind == \"ServiceAccount\"\n rolebinding.subjects[j].name == sa.metadata.name\n rolebinding.subjects[j].namespace == sa.metadata.namespace\n\n reviewPath := \"roleRef\"\n deletePath := sprintf(\"subjects[%d]\", [j])\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v has cluster takeover roles\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa,\n },\n {\n \"object\": rolebinding,\n\t\t \"reviewPaths\": [reviewPath],\n \"deletePaths\": [deletePath],\n },\n {\n \"object\": role,\n },]\n }\n}\n\n\nget_start_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken not in pod spec\n not wl_spec.automountServiceAccountToken == false\n not wl_spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n}\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken set to true in pod spec\n wl_spec.automountServiceAccountToken == true\n}\n\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n wl_spec.serviceAccountName == sa_metadata.name\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n not wl_spec.serviceAccountName \n sa_metadata.name == \"default\"\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\n# is_same_namespace supports cases where ns is not configured in the metadata\n# for yaml scans\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n\n# look for rule allowing create/update workloads\nis_takeover_role(role){\n takeover_resources := [\"pods\", \"*\"]\n takeover_verbs := [\"create\", \"update\", \"patch\", \"*\"]\n takeover_api_groups := [\"\", \"*\"]\n \n takeover_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in takeover_resources ; \n rule.verbs[b] in takeover_verbs ; \n rule.apiGroups[c] in takeover_api_groups]\n count(takeover_rule) > 0\n}\n\n# look for rule allowing secret access\nis_takeover_role(role){\n rule := role.rules[i]\n takeover_resources := [\"secrets\", \"*\"]\n takeover_verbs := [\"get\", \"list\", \"watch\", \"*\"]\n takeover_api_groups := [\"\", \"*\"]\n \n takeover_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in takeover_resources ; \n rule.verbs[b] in takeover_verbs ; \n rule.apiGroups[c] in takeover_api_groups]\n count(takeover_rule) > 0\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" + } + ] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-cpu-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU limits are not set.", + "remediation": "Ensure CPU limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-memory-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory limits are not set.", + "remediation": "Ensure memory limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" + } + ] + }, + { + "name": "Workload with administrative roles", + "attributes": {}, + "description": "This control identifies workloads where the associated service accounts have roles that grant administrative-level access across the cluster. Granting a workload such expansive permissions equates to providing it cluster admin roles. This level of access can pose a significant security risk, as it allows the workload to perform any action on any resource, potentially leading to unauthorized data access or cluster modifications.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use service accounts with such high permissions for daily operations.", + "long_description": "In Kubernetes environments, workloads granted administrative-level privileges without restrictions represent a critical security vulnerability. When a service account associated with a workload is configured with permissions to perform any action on any resource, it essentially holds unrestricted access within the cluster, akin to cluster admin privileges. This configuration dramatically increases the risk of security breaches, including data theft, unauthorized modifications, and potentially full cluster takeovers. Such privileges allow attackers to exploit the workload for wide-ranging malicious activities, bypassing the principle of least privilege. Therefore, it's essential to follow the least privilege principle and make sure cluster admin permissions are granted only when it is absolutely necessary.", + "test": "Check if the service account used by a workload has cluster admin roles, either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges.", + "controlID": "C-0272", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "workload-with-administrative-roles", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_start_of_path(wl)\n wl_spec := object.get(wl, start_of_path, [])\n\n # get service account wl is using\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(wl_spec, sa.metadata, wl.metadata)\n\n # check service account token is mounted\n is_sa_auto_mounted(wl_spec, sa)\n\n # check if sa has administrative roles\n role := input[_]\n role.kind in [\"Role\", \"ClusterRole\"]\n is_administrative_role(role)\n\n rolebinding := input[_]\n\trolebinding.kind in [\"RoleBinding\", \"ClusterRoleBinding\"] \n rolebinding.roleRef.name == role.metadata.name\n rolebinding.subjects[j].kind == \"ServiceAccount\"\n rolebinding.subjects[j].name == sa.metadata.name\n rolebinding.subjects[j].namespace == sa.metadata.namespace\n\n reviewPath := \"roleRef\"\n deletePath := sprintf(\"subjects[%d]\", [j])\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v has administrative roles\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa,\n },\n {\n \"object\": rolebinding,\n\t\t \"reviewPaths\": [reviewPath],\n \"deletePaths\": [deletePath],\n },\n {\n \"object\": role,\n },]\n }\n}\n\n\nget_start_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken not in pod spec\n not wl_spec.automountServiceAccountToken == false\n not wl_spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n}\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken set to true in pod spec\n wl_spec.automountServiceAccountToken == true\n}\n\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n wl_spec.serviceAccountName == sa_metadata.name\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n not wl_spec.serviceAccountName \n sa_metadata.name == \"default\"\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\n# is_same_namespace supports cases where ns is not configured in the metadata\n# for yaml scans\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n\nis_administrative_role(role){\n administrative_resources := [\"*\"]\n administrative_verbs := [\"*\"]\n administrative_api_groups := [\"\", \"*\"]\n \n administrative_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in administrative_resources ; \n rule.verbs[b] in administrative_verbs ; \n rule.apiGroups[c] in administrative_api_groups]\n count(administrative_rule) > 0\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" + } + ] + }, + { + "name": "Outdated Kubernetes version", + "attributes": {}, + "description": "Identifies Kubernetes clusters running on outdated versions. Using old versions can expose clusters to known vulnerabilities, compatibility issues, and miss out on improved features and security patches. Keeping Kubernetes up-to-date is crucial for maintaining security and operational efficiency.", + "remediation": "Regularly update Kubernetes clusters to the latest stable version to mitigate known vulnerabilities and enhance functionality. Plan and execute upgrades considering workload compatibility, testing in a staging environment before applying changes to production. Follow Kubernetes' best practices for version management and upgrades to ensure a smooth transition and minimal downtime.", + "long_description": "Running an outdated version of Kubernetes poses significant security risks and operational challenges. Older versions may contain unpatched vulnerabilities, leading to potential security breaches and unauthorized access. Additionally, outdated clusters might not support newer, more secure, and efficient features, impacting both performance and security. Regularly updating Kubernetes ensures compliance with the latest security standards and access to enhanced functionalities.", + "test": "Verifies the current Kubernetes version against the latest stable releases.", + "controlID": "C-0273", + "baseScore": 2.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "outdated-k8s-version", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\tnode := input[_]\n\tnode.kind == \"Node\"\n\tcurrent_version := node.status.nodeInfo.kubeletVersion\n has_outdated_version(current_version)\n\tpath := \"status.nodeInfo.kubeletVersion\"\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Your kubelet version: %s, in node: %s is outdated\", [current_version, node.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [node]},\n\t}\n}\n\n\nhas_outdated_version(version) {\n\t# the `supported_k8s_versions` is validated in the validations script against \"https://api.github.com/repos/kubernetes/kubernetes/releases\"\n supported_k8s_versions := [\"v1.29\", \"v1.28\", \"v1.27\"] \n\tevery v in supported_k8s_versions{\n\t\tnot startswith(version, v)\n\t}\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0005", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0045", + "C-0046", + "C-0048", + "C-0057", + "C-0066", + "C-0069", + "C-0070", + "C-0074", + "C-0211", + "C-0255", + "C-0256", + "C-0257", + "C-0258", + "C-0259", + "C-0260", + "C-0261", + "C-0262", + "C-0265", + "C-0267", + "C-0270", + "C-0271", + "C-0272", + "C-0273" + ] +} \ No newline at end of file diff --git a/releaseDev/security_frameworks.json b/releaseDev/security_frameworks.json new file mode 100644 index 000000000..639ce4869 --- /dev/null +++ b/releaseDev/security_frameworks.json @@ -0,0 +1,2569 @@ +[ + { + "name": "WorkloadScan", + "description": "Framework for scanning a workload", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "security" + ], + "version": null, + "controls": [ + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0236", + "name": "Verify image signature", + "description": "Verifies the signature of each image with given public keys", + "long_description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "manual_test": "", + "references": [], + "attributes": { + "actionRequired": "configuration" + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0237", + "name": "Check if signature exists", + "description": "Ensures that all images contain some signature", + "long_description": "Verifies that each image is signed", + "remediation": "Replace the image with a signed image", + "manual_test": "", + "references": [], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workload with PVC access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", + "controlID": "C-0257", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "rules": [] + }, + { + "name": "Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "category": { + "name": "Workload", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Missing network policy", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0078", + "C-0236", + "C-0237", + "C-0045", + "C-0048", + "C-0257", + "C-0207", + "C-0034", + "C-0012", + "C-0041", + "C-0260", + "C-0044", + "C-0038", + "C-0046", + "C-0013", + "C-0016", + "C-0017", + "C-0055", + "C-0057", + "C-0270", + "C-0271" + ] + }, + { + "name": "security", + "description": "Controls that are used to assess security threats.", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "security" + ], + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "version": null, + "controls": [ + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" + ], + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workload with secret access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Secret Access" + ] + } + ] + }, + "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", + "controlID": "C-0255", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Exposure to Internet", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-database-without-authentication", + "categories": [ + "Initial Access" + ] + } + ] + }, + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", + "controlID": "C-0256", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Workload with PVC access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", + "controlID": "C-0257", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "rules": [] + }, + { + "name": "Workload with configMap access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", + "controlID": "C-0258", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workload with credential access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "This control checks if workloads specifications have sensitive information in their environment variables.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", + "controlID": "C-0259", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Missing network policy", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "ServiceAccount token mounted", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", + "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", + "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", + "controlID": "C-0261", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0265", + "name": "Authenticated user has sensitive permissions", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Workload with cluster takeover roles", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Cluster Access" + ], + "displayRelatedResources": true, + "clickableResourceKind": "ServiceAccount" + } + ] + }, + "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", + "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", + "long_description": "In Kubernetes, workloads with overly permissive roles pose a significant security risk. When a workload is granted roles that exceed the necessities of its operation, it creates an attack surface for privilege escalation within the cluster. This is especially critical if the roles include permissions for creating, updating, or accessing sensitive resources or secrets. An attacker exploiting such a workload can leverage these excessive privileges to perform unauthorized actions, potentially leading to a full cluster takeover. Ensuring that each service account associated with a workload is limited to permissions that are strictly necessary for its function is crucial in mitigating the risk of cluster takeovers.", + "test": "Check if the service account used by a workload has cluster takeover roles.", + "controlID": "C-0267", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workload with administrative roles", + "attributes": {}, + "description": "This control identifies workloads where the associated service accounts have roles that grant administrative-level access across the cluster. Granting a workload such expansive permissions equates to providing it cluster admin roles. This level of access can pose a significant security risk, as it allows the workload to perform any action on any resource, potentially leading to unauthorized data access or cluster modifications.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use service accounts with such high permissions for daily operations.", + "long_description": "In Kubernetes environments, workloads granted administrative-level privileges without restrictions represent a critical security vulnerability. When a service account associated with a workload is configured with permissions to perform any action on any resource, it essentially holds unrestricted access within the cluster, akin to cluster admin privileges. This configuration dramatically increases the risk of security breaches, including data theft, unauthorized modifications, and potentially full cluster takeovers. Such privileges allow attackers to exploit the workload for wide-ranging malicious activities, bypassing the principle of least privilege. Therefore, it's essential to follow the least privilege principle and make sure cluster admin permissions are granted only when it is absolutely necessary.", + "test": "Check if the service account used by a workload has cluster admin roles, either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges.", + "controlID": "C-0272", + "baseScore": 6.0, + "category": { + "name": "Workload", + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Outdated Kubernetes version", + "attributes": {}, + "description": "Identifies Kubernetes clusters running on outdated versions. Using old versions can expose clusters to known vulnerabilities, compatibility issues, and miss out on improved features and security patches. Keeping Kubernetes up-to-date is crucial for maintaining security and operational efficiency.", + "remediation": "Regularly update Kubernetes clusters to the latest stable version to mitigate known vulnerabilities and enhance functionality. Plan and execute upgrades considering workload compatibility, testing in a staging environment before applying changes to production. Follow Kubernetes' best practices for version management and upgrades to ensure a smooth transition and minimal downtime.", + "long_description": "Running an outdated version of Kubernetes poses significant security risks and operational challenges. Older versions may contain unpatched vulnerabilities, leading to potential security breaches and unauthorized access. Additionally, outdated clusters might not support newer, more secure, and efficient features, impacting both performance and security. Regularly updating Kubernetes ensures compliance with the latest security standards and access to enhanced functionalities.", + "test": "Verifies the current Kubernetes version against the latest stable releases.", + "controlID": "C-0273", + "baseScore": 2.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0005", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0045", + "C-0046", + "C-0048", + "C-0057", + "C-0066", + "C-0069", + "C-0070", + "C-0074", + "C-0211", + "C-0255", + "C-0256", + "C-0257", + "C-0258", + "C-0259", + "C-0260", + "C-0261", + "C-0262", + "C-0265", + "C-0267", + "C-0270", + "C-0271", + "C-0272", + "C-0273" + ] + }, + { + "name": "ClusterScan", + "description": "Framework for scanning a cluster", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "security" + ], + "version": null, + "controls": [ + { + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "RBAC enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0265", + "name": "Authenticated user has sensitive permissions", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", + "attributes": {}, + "baseScore": 7, + "category": { + "name": "Control plane", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Roles with delete capabilities", + "attributes": { + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Validate admission controller (validating)", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0036", + "baseScore": 3.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Validate admission controller (mutating)", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0039", + "baseScore": 4.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" + ], + "attributes": {}, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" + ], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Missing network policy", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Exposure to internet", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-database-without-authentication", + "categories": [ + "Initial Access" + ] + } + ] + }, + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", + "controlID": "C-0256", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0066", + "C-0088", + "C-0067", + "C-0005", + "C-0262", + "C-0265", + "C-0015", + "C-0002", + "C-0007", + "C-0063", + "C-0036", + "C-0039", + "C-0035", + "C-0188", + "C-0187", + "C-0012", + "C-0260", + "C-0256", + "C-0038", + "C-0041", + "C-0048", + "C-0057", + "C-0013" + ] + } +] \ No newline at end of file diff --git a/releaseDev/soc2.json b/releaseDev/soc2.json new file mode 100644 index 000000000..2ef630fe9 --- /dev/null +++ b/releaseDev/soc2.json @@ -0,0 +1,537 @@ +{ + "name": "SOC2", + "description": "SOC2 compliance related controls", + "attributes": { + "armoBuiltin": true + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Firewall (CC6.1,CC6.6,CC7.2)", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management.", + "remediation": "Define network policies for all workloads to protect unwanted access", + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ensure_network_policy_configured_in_labels", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "description": "fails if no networkpolicy configured in workload labels", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ], + "long_description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management." + }, + { + "name": "Cryptographic key management - misplaced secrets (CC6.1,CC6.6,CC6.7)", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + } + ] + }, + { + "name": "Cryptographic key management - minimize access to secrets (CC6.1,CC6.6,CC6.7)", + "controlID": "C-0186", + "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "long_description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" + ], + "attributes": {}, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Access restriction to infrastructure - admin access (CC6.1 ,CC6.2, CC6.7, CC6.8)", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "category": { + "name": "Access control", + "id": "Cat-2" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Event logging (CC6.8,CC7.1,CC7.2)", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers.", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers: - Logon attempts - Data deletions - Application and system errors - Changes to software and configuration settings - Changes to system files, configuration files or content files The logs are monitored by IT Operations staff and significant issues are investigated and resolved within a timely manner.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "category": { + "name": "Control plane", + "id": "Cat-1" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Data in motion encryption - Ingress is TLS encrypted (CC6.1,CC6.6,CC6.7)", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", + "remediation": "The user needs to implement TLS for the Ingress resource in order to encrypt the incoming traffic", + "test": "Check if the Ingress resource has TLS configured", + "controlID": "C-0263", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ingress-no-tls", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "Ingress should not be configured without TLS", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\n\t# Check if ingress has TLS enabled\n\tnot ingress.spec.tls\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Ingress '%v' has not TLS definition\", [ingress.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n \"path\": \"spec.tls\",\n \"value\": \"\"\n }],\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"k8sApiObjects\": [ingress]}\n\t}\n}\n" + } + ], + "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." + } + ], + "ControlsIDs": [ + "C-0260", + "C-0012", + "C-0186", + "C-0035", + "C-0067", + "C-0263" + ] +} \ No newline at end of file diff --git a/releaseDev/workloadscan.json b/releaseDev/workloadscan.json new file mode 100644 index 000000000..f59cb32e2 --- /dev/null +++ b/releaseDev/workloadscan.json @@ -0,0 +1,2021 @@ +{ + "name": "WorkloadScan", + "description": "Framework for scanning a workload", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "security" + ], + "version": null, + "controls": [ + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useUntilKubescapeVersion": "v2.3.8" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + }, + { + "name": "container-image-repository-v1", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useFromKubescapeVersion": "v2.9.0" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" + } + ] + }, + { + "controlID": "C-0236", + "name": "Verify image signature", + "description": "Verifies the signature of each image with given public keys", + "long_description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "manual_test": "", + "references": [], + "attributes": { + "actionRequired": "configuration" + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "verify-image-signature", + "attributes": { + "useFromKubescapeVersion": "v2.1.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "ruleQuery": "armo_builtins", + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.trustedCosignPublicKeys", + "name": "Trusted Cosign public keys", + "description": "A list of trusted Cosign public keys that are used for validating container image signatures." + } + ], + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.containers[%v].image\", [i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0237", + "name": "Check if signature exists", + "description": "Ensures that all images contain some signature", + "long_description": "Verifies that each image is signed", + "remediation": "Replace the image with a signed image", + "manual_test": "", + "references": [], + "attributes": {}, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Workload", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "has-image-signature", + "attributes": { + "useFromKubescapeVersion": "v2.1.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Ensures that all images contain some signature", + "remediation": "Replace the image with a signed image", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" + } + ] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-rw-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + }, + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines if any workload contains a hostPath volume with rw permissions", + "remediation": "Set the readOnly field of the mount to true", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"deletePaths\": failed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n}" + } + ] + }, + { + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-any-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" + } + ] + }, + { + "name": "Workload with PVC access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", + "controlID": "C-0257", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + }, + "id": "Cat-5" + }, + "rules": [ + { + "name": "workload-mounted-pvc", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts PVC", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" + ], + "attributes": {}, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "category": { + "name": "Workload", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-secrets-in-env-var", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if Pods have secrets in environment variables", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "category": { + "name": "Secrets", + "id": "Cat-3" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed", + "settings.postureControlInputs.sensitiveKeyNamesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" + } + ] + }, + { + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Network", + "id": "Cat-4" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-network-access", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + } + ] + }, + { + "name": "Missing network policy", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ensure_network_policy_configured_in_labels", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "description": "fails if no networkpolicy configured in workload labels", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "category": { + "name": "Network", + "id": "Cat-4" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-pid-ipc-privileges", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + } + ] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "insecure-capabilities", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + } + ] + }, + { + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "non-root-containers", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" + } + ] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-allow-privilege-escalation", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "immutable-container-filesystem", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" + } + ] + }, + { + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "linux-hardening", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define any linux security hardening", + "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0270", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-cpu-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "CPU limits are not set.", + "remediation": "Ensure CPU limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0271", + "baseScore": 8.0, + "category": { + "name": "Workload", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + }, + "id": "Cat-5" + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-memory-limits", + "attributes": {}, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "memory limits are not set.", + "remediation": "Ensure memory limits are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0078", + "C-0236", + "C-0237", + "C-0045", + "C-0048", + "C-0257", + "C-0207", + "C-0034", + "C-0012", + "C-0041", + "C-0260", + "C-0044", + "C-0038", + "C-0046", + "C-0013", + "C-0016", + "C-0017", + "C-0055", + "C-0057", + "C-0270", + "C-0271" + ] +} \ No newline at end of file From 896ae45cb3e94960f4ff44355ead9b9f7d510695 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 7 Mar 2024 14:40:16 +0200 Subject: [PATCH 131/195] rm roleref as reviewPath, return only subject to del Signed-off-by: YiscahLevySilas1 --- rules/workload-with-cluster-takeover-roles/raw.rego | 2 -- 1 file changed, 2 deletions(-) diff --git a/rules/workload-with-cluster-takeover-roles/raw.rego b/rules/workload-with-cluster-takeover-roles/raw.rego index 21b39d97a..8b1e1836b 100644 --- a/rules/workload-with-cluster-takeover-roles/raw.rego +++ b/rules/workload-with-cluster-takeover-roles/raw.rego @@ -27,7 +27,6 @@ deny[msga] { rolebinding.subjects[j].name == sa.metadata.name rolebinding.subjects[j].namespace == sa.metadata.namespace - reviewPath := "roleRef" deletePath := sprintf("subjects[%d]", [j]) msga := { @@ -42,7 +41,6 @@ deny[msga] { }, { "object": rolebinding, - "reviewPaths": [reviewPath], "deletePaths": [deletePath], }, { From ac0c3c5f492903e81a649839c9aaac0586388844 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 7 Mar 2024 17:17:32 +0200 Subject: [PATCH 132/195] rm roleref as reviewPath, return only subject to del Signed-off-by: YiscahLevySilas1 --- .../test/fail-wl-creates-pod/expected.json | 4 +--- .../test/fail-wl-gets-secrets/expected.json | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json index 1202efe09..664eb4239 100644 --- a/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-creates-pod/expected.json @@ -69,9 +69,7 @@ ] }, "failedPaths": null, - "reviewPaths": [ - "roleRef" - ], + "reviewPaths": null, "deletePaths": [ "subjects[1]" ], diff --git a/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json index 968a65fff..f18ced8a9 100644 --- a/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json +++ b/rules/workload-with-cluster-takeover-roles/test/fail-wl-gets-secrets/expected.json @@ -69,9 +69,7 @@ ] }, "failedPaths": null, - "reviewPaths": [ - "roleRef" - ], + "reviewPaths": null, "deletePaths": [ "subjects[0]" ], From 1843e60ef0c6716385be521a6a06e427f7a5e441 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 10 Mar 2024 09:59:24 +0200 Subject: [PATCH 133/195] change control name Signed-off-by: YiscahLevySilas1 --- controls/C-0256-exposuretointernet.json | 2 +- frameworks/clusterscan.json | 2 +- frameworks/security.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/controls/C-0256-exposuretointernet.json b/controls/C-0256-exposuretointernet.json index 044528cc5..0abb72f5b 100644 --- a/controls/C-0256-exposuretointernet.json +++ b/controls/C-0256-exposuretointernet.json @@ -1,5 +1,5 @@ { - "name": "Exposure to internet", + "name": "External facing", "attributes": { "controlTypeTags": [ "security" diff --git a/frameworks/clusterscan.json b/frameworks/clusterscan.json index 4ae67e5d1..97353bb0d 100644 --- a/frameworks/clusterscan.json +++ b/frameworks/clusterscan.json @@ -113,7 +113,7 @@ { "controlID": "C-0256", "patch": { - "name": "Exposure to internet" + "name": "External facing" } }, { diff --git a/frameworks/security.json b/frameworks/security.json index a2928c2b4..e02f408f2 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -137,7 +137,7 @@ { "controlID": "C-0256", "patch": { - "name": "Exposure to Internet" + "name": "External facing" } }, { From 8c31f73e519821e437eed3d1f01578ebc1097d5f Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 12 Mar 2024 11:44:57 +0200 Subject: [PATCH 134/195] rm releaseDev dir Signed-off-by: YiscahLevySilas1 --- releaseDev/ControlID_RuleName.csv | 297 - releaseDev/FWName_CID_CName.csv | 475 - releaseDev/allcontrols.json | 4656 ---------- releaseDev/armobest.json | 3066 ------ releaseDev/attack_tracks.json | 109 - releaseDev/cis-aks-t1.2.0.json | 4282 --------- releaseDev/cis-eks-t1.2.0.json | 4456 --------- releaseDev/cis-v1.23-t1.0.1.json | 8583 ----------------- releaseDev/clusterscan.json | 1812 ---- releaseDev/controls.json | 7132 -------------- releaseDev/default_config_inputs.json | 145 - releaseDev/devopsbest.json | 1107 --- releaseDev/exceptions.json | 7820 ---------------- releaseDev/frameworks.json | 11764 ------------------------ releaseDev/mitre.json | 2112 ----- releaseDev/nsa.json | 2096 ----- releaseDev/rules.json | 8856 ------------------ releaseDev/security.json | 3407 ------- releaseDev/security_frameworks.json | 2569 ------ releaseDev/soc2.json | 537 -- releaseDev/workloadscan.json | 2021 ---- 21 files changed, 77302 deletions(-) delete mode 100644 releaseDev/ControlID_RuleName.csv delete mode 100644 releaseDev/FWName_CID_CName.csv delete mode 100644 releaseDev/allcontrols.json delete mode 100644 releaseDev/armobest.json delete mode 100644 releaseDev/attack_tracks.json delete mode 100644 releaseDev/cis-aks-t1.2.0.json delete mode 100644 releaseDev/cis-eks-t1.2.0.json delete mode 100644 releaseDev/cis-v1.23-t1.0.1.json delete mode 100644 releaseDev/clusterscan.json delete mode 100644 releaseDev/controls.json delete mode 100644 releaseDev/default_config_inputs.json delete mode 100644 releaseDev/devopsbest.json delete mode 100644 releaseDev/exceptions.json delete mode 100644 releaseDev/frameworks.json delete mode 100644 releaseDev/mitre.json delete mode 100644 releaseDev/nsa.json delete mode 100644 releaseDev/rules.json delete mode 100644 releaseDev/security.json delete mode 100644 releaseDev/security_frameworks.json delete mode 100644 releaseDev/soc2.json delete mode 100644 releaseDev/workloadscan.json diff --git a/releaseDev/ControlID_RuleName.csv b/releaseDev/ControlID_RuleName.csv deleted file mode 100644 index 2b780bf71..000000000 --- a/releaseDev/ControlID_RuleName.csv +++ /dev/null @@ -1,297 +0,0 @@ -ControlID,RuleName -C-0105,ensure-that-the-admin.conf-file-ownership-is-set-to-root-root -C-0108,ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive -C-0209,list-all-namespaces -C-0106,ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive -C-0084,exposed-rce-pods -C-0012,rule-credentials-in-env-var -C-0012,rule-credentials-configmap -C-0207,rule-secrets-in-env-var -C-0270,resources-cpu-limits -C-0124,ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used -C-0216,psp-deny-hostnetwork -C-0129,ensure-that-the-api-server-profiling-argument-is-set-to-false -C-0111,ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive -C-0267,workload-with-cluster-takeover-roles -C-0160,k8s-audit-logs-enabled-native-cis -C-0199,pod-security-admission-baseline-applied-1 -C-0199,pod-security-admission-baseline-applied-2 -C-0226,alert-container-optimized-os-not-in-use -C-0145,ensure-that-the-controller-manager-profiling-argument-is-set-to-false -C-0167,ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root -C-0151,ensure-that-the-scheduler-profiling-argument-is-set-to-false -C-0057,rule-privilege-escalation -C-0015,rule-can-list-get-secrets-v1 -C-0159,etcd-unique-ca -C-0134,ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate -C-0112,ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600 -C-0152,ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1 -C-0122,ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set -C-0004,resources-memory-limit-and-request -C-0204,pod-security-admission-baseline-applied-1 -C-0204,pod-security-admission-baseline-applied-2 -C-0070,enforce-kubelet-client-tls-authentication-updated -C-0102,ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive -C-0021,exposed-sensitive-interfaces-v1 -C-0103,ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd -C-0163,ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root -C-0002,exec-into-container-v1 -C-0213,psp-deny-privileged-container -C-0255,workload-mounted-secrets -C-0113,ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false -C-0240,rule-cni-enabled-aks -C-0153,etcd-tls-enabled -C-0030,ingress-and-egress-blocked -C-0063,rule-can-portforward-v1 -C-0059,nginx-ingress-snippet-annotation-vulnerability -C-0269,resources-memory-requests -C-0254,rule-manual -C-0197,pod-security-admission-restricted-applied-1 -C-0197,pod-security-admission-restricted-applied-2 -C-0133,ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate -C-0036,list-all-validating-webhooks -C-0248,ensure-clusters-are-created-with-private-nodes -C-0061,pods-in-default-namespace -C-0196,pod-security-admission-baseline-applied-1 -C-0196,pod-security-admission-baseline-applied-2 -C-0045,alert-rw-hostpath -C-0180,kubelet-event-qps -C-0217,psp-deny-allowprivilegeescalation -C-0046,insecure-capabilities -C-0130,ensure-that-the-api-server-audit-log-path-argument-is-set -C-0127,ensure-that-the-admission-control-plugin-NodeRestriction-is-set -C-0210,set-seccomp-profile-RuntimeDefault -C-0176,kubelet-streaming-connection-idle-timeout -C-0110,ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root -C-0089,CVE-2022-3172 -C-0273,outdated-k8s-version -C-0037,rule-can-update-configmap-v1 -C-0078,container-image-repository -C-0078,container-image-repository-v1 -C-0225,ensure-default-service-accounts-has-only-default-roles -C-0225,automount-default-service-account -C-0123,ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set -C-0227,ensure-endpointprivateaccess-is-enabled -C-0198,pod-security-admission-restricted-applied-1 -C-0198,pod-security-admission-restricted-applied-2 -C-0119,ensure-that-the-api-server-authorization-mode-argument-includes-Node -C-0251,list-role-definitions-in-acr -C-0044,container-hostPort -C-0238,Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive -C-0118,ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow -C-0233,alert-fargate-not-in-use -C-0052,instance-metadata-api-access -C-0035,rule-list-all-cluster-admins-v1 -C-0136,ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate -C-0115,ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set -C-0258,workload-mounted-configmap -C-0245,encrypt-traffic-to-https-load-balancers-with-tls-certificates -C-0235,ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive -C-0144,ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate -C-0107,ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root -C-0212,pods-in-default-namespace -C-0212,rolebinding-in-default-namespace -C-0212,role-in-default-namespace -C-0212,configmap-in-default-namespace -C-0212,endpoints-in-default-namespace -C-0212,persistentvolumeclaim-in-default-namespace -C-0212,podtemplate-in-default-namespace -C-0212,replicationcontroller-in-default-namespace -C-0212,service-in-default-namespace -C-0212,serviceaccount-in-default-namespace -C-0212,endpointslice-in-default-namespace -C-0212,horizontalpodautoscaler-in-default-namespace -C-0212,lease-in-default-namespace -C-0212,csistoragecapacity-in-default-namespace -C-0212,ingress-in-default-namespace -C-0212,poddisruptionbudget-in-default-namespace -C-0212,resources-secret-in-default-namespace -C-0001,rule-identify-blocklisted-image-registries -C-0001,rule-identify-blocklisted-image-registries-v1 -C-0262,anonymous-access-enabled -C-0214,psp-deny-hostpid -C-0128,ensure-that-the-api-server-secure-port-argument-is-not-set-to-0 -C-0093,ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root -C-0073,naked-pods -C-0104,ensure-that-the-admin.conf-file-permissions-are-set-to-600 -C-0069,anonymous-requests-to-kubelet-service-updated -C-0189,automount-default-service-account -C-0189,namespace-without-service-account -C-0109,ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root -C-0229,ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks -C-0188,rule-can-create-pod -C-0173,kubelet-authorization-mode-alwaysAllow -C-0141,ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate -C-0161,audit-policy-content -C-0234,ensure-external-secrets-storage-is-in-use -C-0100,ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive -C-0203,pod-security-admission-baseline-applied-1 -C-0203,pod-security-admission-baseline-applied-2 -C-0157,etcd-peer-client-auth-cert -C-0094,ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive -C-0181,validate-kubelet-tls-configuration-updated -C-0183,kubelet-rotate-kubelet-server-certificate -C-0168,ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive -C-0193,pod-security-admission-baseline-applied-1 -C-0193,pod-security-admission-baseline-applied-2 -C-0016,rule-allow-privilege-escalation -C-0087,CVE-2022-23648 -C-0256,exposure-to-internet -C-0099,ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root -C-0241,ensure-azure-rbac-is-set -C-0026,rule-deny-cronjobs -C-0165,if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root -C-0121,ensure-that-the-admission-control-plugin-EventRateLimit-is-set -C-0095,ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root -C-0055,linux-hardening -C-0182,kubelet-rotate-certificates -C-0075,image-pull-policy-is-not-set-to-always -C-0220,psp-required-drop-capabilities -C-0155,etcd-auto-tls-disabled -C-0260,ensure_network_policy_configured_in_labels -C-0263,ingress-no-tls -C-0162,ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive -C-0228,ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks -C-0201,pod-security-admission-restricted-applied-1 -C-0201,pod-security-admission-restricted-applied-2 -C-0247,restrict-access-to-the-control-plane-endpoint -C-0131,ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate -C-0042,rule-can-ssh-to-pod-v1 -C-0184,kubelet-strong-cryptographics-ciphers -C-0211,rule-privilege-escalation -C-0211,immutable-container-filesystem -C-0211,non-root-containers -C-0211,drop-capability-netraw -C-0211,set-seLinuxOptions -C-0211,set-seccomp-profile -C-0211,set-procmount-default -C-0211,set-fsgroup-value -C-0211,set-fsgroupchangepolicy-value -C-0211,set-sysctls-params -C-0211,set-supplementalgroups-values -C-0211,rule-allow-privilege-escalation -C-0067,k8s-audit-logs-enabled-cloud -C-0067,k8s-audit-logs-enabled-native -C-0066,secret-etcd-encryption-cloud -C-0066,etcd-encryption-native -C-0265,system-authenticated-allowed-to-take-over-cluster -C-0244,secret-etcd-encryption-cloud -C-0005,insecure-port-flag -C-0179,kubelet-hostname-override -C-0140,ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate -C-0007,rule-excessive-delete-rights-v1 -C-0221,ensure-image-scanning-enabled-cloud -C-0222,ensure-aws-policies-are-present -C-0147,ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate -C-0090,CVE-2022-39328 -C-0195,pod-security-admission-baseline-applied-1 -C-0195,pod-security-admission-baseline-applied-2 -C-0120,ensure-that-the-api-server-authorization-mode-argument-includes-RBAC -C-0191,rule-can-bind-escalate -C-0191,rule-can-impersonate-users-groups-v1 -C-0205,ensure-that-the-cni-in-use-supports-network-policies -C-0232,review-roles-with-aws-iam-authenticator -C-0246,rule-manual -C-0041,host-network-access -C-0208,external-secret-storage -C-0017,immutable-container-filesystem -C-0185,cluster-admin-role -C-0172,anonymous-requests-to-kubelet-service-updated -C-0218,psp-deny-root-container -C-0077,k8s-common-labels-usage -C-0200,pod-security-admission-restricted-applied-1 -C-0200,pod-security-admission-restricted-applied-2 -C-0268,resources-cpu-requests -C-0174,enforce-kubelet-client-tls-authentication-updated -C-0242,rule-hostile-multitenant-workloads -C-0261,serviceaccount-token-mount -C-0098,ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive -C-0135,ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true -C-0171,ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root -C-0170,if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive -C-0175,read-only-port-enabled-updated -C-0062,sudo-in-container-entrypoint -C-0034,automount-service-account -C-0154,etcd-client-auth-cert -C-0101,ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root -C-0009,resource-policies -C-0117,ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate -C-0206,internal-networking -C-0252,ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled -C-0137,ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate -C-0178,kubelet-ip-tables -C-0166,ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive -C-0083,exposed-critical-pods -C-0146,ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true -C-0192,pod-security-admission-applied-1 -C-0192,pod-security-admission-applied-2 -C-0259,rule-credentials-in-env-var -C-0148,ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate -C-0239,ensure-default-service-accounts-has-only-default-roles -C-0236,verify-image-signature -C-0215,psp-deny-hostipc -C-0088,rbac-enabled-cloud -C-0088,rbac-enabled-native -C-0085,excessive_amount_of_vulnerabilities_pods -C-0243,ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider -C-0096,ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive -C-0257,workload-mounted-pvc -C-0194,pod-security-admission-baseline-applied-1 -C-0194,pod-security-admission-baseline-applied-2 -C-0190,automount-service-account -C-0038,host-pid-ipc-privileges -C-0116,ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate -C-0039,list-all-mutating-webhooks -C-0219,psp-deny-allowed-capabilities -C-0125,ensure-that-the-admission-control-plugin-ServiceAccount-is-set -C-0054,internal-networking -C-0272,workload-with-administrative-roles -C-0249,rule-manual -C-0048,alert-any-hostpath -C-0114,ensure-that-the-api-server-token-auth-file-parameter-is-not-set -C-0068,psp-enabled-cloud -C-0068,psp-enabled-native -C-0143,ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers -C-0081,CVE-2022-24348 -C-0079,CVE-2022-0185 -C-0250,ensure-service-principle-has-read-only-permissions -C-0091,CVE-2022-47633 -C-0018,configured-readiness-probe -C-0074,containers-mounting-docker-socket -C-0138,ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate -C-0164,if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive -C-0149,ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true -C-0177,kubelet-protect-kernel-defaults -C-0150,ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1 -C-0020,alert-mount-potential-credentials-paths -C-0014,rule-access-dashboard-subject-v1 -C-0014,rule-access-dashboard-wl-v1 -C-0186,rule-can-list-get-secrets-v1 -C-0264,pv-without-encryption -C-0187,rule-list-all-cluster-admins-v1 -C-0013,non-root-containers -C-0202,pod-security-admission-baseline-applied-1 -C-0202,pod-security-admission-baseline-applied-2 -C-0058,Symlink-Exchange-Can-Allow-Host-Filesystem-Access -C-0076,label-usage-for-resources -C-0050,resources-cpu-limit-and-request -C-0142,ensure-that-the-api-server-encryption-providers-are-appropriately-configured -C-0237,has-image-signature -C-0169,ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root -C-0156,etcd-peer-tls-enabled -C-0223,ensure_nodeinstancerole_has_right_permissions_for_ecr -C-0231,ensure-https-loadbalancers-encrypted-with-tls-aws -C-0132,ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate -C-0158,etcd-peer-auto-tls-disabled -C-0253,rule-identify-old-k8s-registry -C-0053,access-container-service-account-v1 -C-0056,configured-liveness-probe -C-0097,ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root -C-0230,ensure-network-policy-is-enabled-eks -C-0092,ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive -C-0049,internal-networking -C-0065,rule-can-impersonate-users-groups-v1 -C-0126,ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set -C-0031,rule-can-delete-k8s-events-v1 -C-0271,resources-memory-limits -C-0139,ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate diff --git a/releaseDev/FWName_CID_CName.csv b/releaseDev/FWName_CID_CName.csv deleted file mode 100644 index 466904a6e..000000000 --- a/releaseDev/FWName_CID_CName.csv +++ /dev/null @@ -1,475 +0,0 @@ -frameworkName,ControlID,ControlName -DevOpsBest,C-0018,Configured readiness probe -DevOpsBest,C-0044,Container hostPort -DevOpsBest,C-0056,Configured liveness probe -DevOpsBest,C-0061,Pods in default namespace -DevOpsBest,C-0073,Naked pods -DevOpsBest,C-0074,Container runtime socket mounted -DevOpsBest,C-0075,Image pull policy on latest tag -DevOpsBest,C-0076,Label usage for resources -DevOpsBest,C-0077,K8s common labels usage -DevOpsBest,C-0253,Deprecated Kubernetes image registry -DevOpsBest,C-0268,Ensure CPU requests are set -DevOpsBest,C-0269,Ensure memory requests are set -DevOpsBest,C-0270,Ensure CPU limits are set -DevOpsBest,C-0271,Ensure memory limits are set -AllControls,C-0002,Prevent containers from allowing command execution -AllControls,C-0005,API server insecure port is enabled -AllControls,C-0007,Roles with delete capabilities -AllControls,C-0012,Applications credentials in configuration files -AllControls,C-0013,Non-root containers -AllControls,C-0014,Access Kubernetes dashboard -AllControls,C-0015,List Kubernetes secrets -AllControls,C-0016,Allow privilege escalation -AllControls,C-0017,Immutable container filesystem -AllControls,C-0018,Configured readiness probe -AllControls,C-0020,Mount service principal -AllControls,C-0021,Exposed sensitive interfaces -AllControls,C-0026,Kubernetes CronJob -AllControls,C-0030,Ingress and Egress blocked -AllControls,C-0031,Delete Kubernetes events -AllControls,C-0034,Automatic mapping of service account -AllControls,C-0035,Administrative Roles -AllControls,C-0036,Validate admission controller (validating) -AllControls,C-0038,Host PID/IPC privileges -AllControls,C-0039,Validate admission controller (mutating) -AllControls,C-0041,HostNetwork access -AllControls,C-0042,SSH server running inside container -AllControls,C-0044,Container hostPort -AllControls,C-0045,Writable hostPath mount -AllControls,C-0046,Insecure capabilities -AllControls,C-0048,HostPath mount -AllControls,C-0049,Network mapping -AllControls,C-0052,Instance Metadata API -AllControls,C-0053,Access container service account -AllControls,C-0054,Cluster internal networking -AllControls,C-0055,Linux hardening -AllControls,C-0056,Configured liveness probe -AllControls,C-0057,Privileged container -AllControls,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. -AllControls,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability -AllControls,C-0061,Pods in default namespace -AllControls,C-0062,Sudo in container entrypoint -AllControls,C-0063,Portforwarding privileges -AllControls,C-0065,No impersonation -AllControls,C-0066,Secret/etcd encryption enabled -AllControls,C-0067,Audit logs enabled -AllControls,C-0068,PSP enabled -AllControls,C-0069,Disable anonymous access to Kubelet service -AllControls,C-0070,Enforce Kubelet client TLS authentication -AllControls,C-0073,Naked pods -AllControls,C-0074,Container runtime socket mounted -AllControls,C-0075,Image pull policy on latest tag -AllControls,C-0076,Label usage for resources -AllControls,C-0077,K8s common labels usage -AllControls,C-0078,Images from allowed registry -AllControls,C-0079,CVE-2022-0185-linux-kernel-container-escape -AllControls,C-0081,CVE-2022-24348-argocddirtraversal -AllControls,C-0087,CVE-2022-23648-containerd-fs-escape -AllControls,C-0088,RBAC enabled -AllControls,C-0090,CVE-2022-39328-grafana-auth-bypass -AllControls,C-0091,CVE-2022-47633-kyverno-signature-bypass -AllControls,C-0262,Anonymous user has RoleBinding -AllControls,C-0265,system:authenticated user has elevated roles -AllControls,C-0270,Ensure CPU limits are set -AllControls,C-0271,Ensure memory limits are set -cis-v1.23-t1.0.1,C-0092,Ensure that the API server pod specification file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0093,Ensure that the API server pod specification file ownership is set to root:root -cis-v1.23-t1.0.1,C-0094,Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0095,Ensure that the controller manager pod specification file ownership is set to root:root -cis-v1.23-t1.0.1,C-0096,Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0097,Ensure that the scheduler pod specification file ownership is set to root:root -cis-v1.23-t1.0.1,C-0098,Ensure that the etcd pod specification file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0099,Ensure that the etcd pod specification file ownership is set to root:root -cis-v1.23-t1.0.1,C-0100,Ensure that the Container Network Interface file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0101,Ensure that the Container Network Interface file ownership is set to root:root -cis-v1.23-t1.0.1,C-0102,Ensure that the etcd data directory permissions are set to 700 or more restrictive -cis-v1.23-t1.0.1,C-0103,Ensure that the etcd data directory ownership is set to etcd:etcd -cis-v1.23-t1.0.1,C-0104,Ensure that the admin.conf file permissions are set to 600 -cis-v1.23-t1.0.1,C-0105,Ensure that the admin.conf file ownership is set to root:root -cis-v1.23-t1.0.1,C-0106,Ensure that the scheduler.conf file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0107,Ensure that the scheduler.conf file ownership is set to root:root -cis-v1.23-t1.0.1,C-0108,Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0109,Ensure that the controller-manager.conf file ownership is set to root:root -cis-v1.23-t1.0.1,C-0110,Ensure that the Kubernetes PKI directory and file ownership is set to root:root -cis-v1.23-t1.0.1,C-0111,Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0112,Ensure that the Kubernetes PKI key file permissions are set to 600 -cis-v1.23-t1.0.1,C-0113,Ensure that the API Server --anonymous-auth argument is set to false -cis-v1.23-t1.0.1,C-0114,Ensure that the API Server --token-auth-file parameter is not set -cis-v1.23-t1.0.1,C-0115,Ensure that the API Server --DenyServiceExternalIPs is not set -cis-v1.23-t1.0.1,C-0116,Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate -cis-v1.23-t1.0.1,C-0117,Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate -cis-v1.23-t1.0.1,C-0118,Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow -cis-v1.23-t1.0.1,C-0119,Ensure that the API Server --authorization-mode argument includes Node -cis-v1.23-t1.0.1,C-0120,Ensure that the API Server --authorization-mode argument includes RBAC -cis-v1.23-t1.0.1,C-0121,Ensure that the admission control plugin EventRateLimit is set -cis-v1.23-t1.0.1,C-0122,Ensure that the admission control plugin AlwaysAdmit is not set -cis-v1.23-t1.0.1,C-0123,Ensure that the admission control plugin AlwaysPullImages is set -cis-v1.23-t1.0.1,C-0124,Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used -cis-v1.23-t1.0.1,C-0125,Ensure that the admission control plugin ServiceAccount is set -cis-v1.23-t1.0.1,C-0126,Ensure that the admission control plugin NamespaceLifecycle is set -cis-v1.23-t1.0.1,C-0127,Ensure that the admission control plugin NodeRestriction is set -cis-v1.23-t1.0.1,C-0128,Ensure that the API Server --secure-port argument is not set to 0 -cis-v1.23-t1.0.1,C-0129,Ensure that the API Server --profiling argument is set to false -cis-v1.23-t1.0.1,C-0130,Ensure that the API Server --audit-log-path argument is set -cis-v1.23-t1.0.1,C-0131,Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate -cis-v1.23-t1.0.1,C-0132,Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate -cis-v1.23-t1.0.1,C-0133,Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate -cis-v1.23-t1.0.1,C-0134,Ensure that the API Server --request-timeout argument is set as appropriate -cis-v1.23-t1.0.1,C-0135,Ensure that the API Server --service-account-lookup argument is set to true -cis-v1.23-t1.0.1,C-0136,Ensure that the API Server --service-account-key-file argument is set as appropriate -cis-v1.23-t1.0.1,C-0137,Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate -cis-v1.23-t1.0.1,C-0138,Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate -cis-v1.23-t1.0.1,C-0139,Ensure that the API Server --client-ca-file argument is set as appropriate -cis-v1.23-t1.0.1,C-0140,Ensure that the API Server --etcd-cafile argument is set as appropriate -cis-v1.23-t1.0.1,C-0141,Ensure that the API Server --encryption-provider-config argument is set as appropriate -cis-v1.23-t1.0.1,C-0142,Ensure that encryption providers are appropriately configured -cis-v1.23-t1.0.1,C-0143,Ensure that the API Server only makes use of Strong Cryptographic Ciphers -cis-v1.23-t1.0.1,C-0144,Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate -cis-v1.23-t1.0.1,C-0145,Ensure that the Controller Manager --profiling argument is set to false -cis-v1.23-t1.0.1,C-0146,Ensure that the Controller Manager --use-service-account-credentials argument is set to true -cis-v1.23-t1.0.1,C-0147,Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate -cis-v1.23-t1.0.1,C-0148,Ensure that the Controller Manager --root-ca-file argument is set as appropriate -cis-v1.23-t1.0.1,C-0149,Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true -cis-v1.23-t1.0.1,C-0150,Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1 -cis-v1.23-t1.0.1,C-0151,Ensure that the Scheduler --profiling argument is set to false -cis-v1.23-t1.0.1,C-0152,Ensure that the Scheduler --bind-address argument is set to 127.0.0.1 -cis-v1.23-t1.0.1,C-0153,Ensure that the --cert-file and --key-file arguments are set as appropriate -cis-v1.23-t1.0.1,C-0154,Ensure that the --client-cert-auth argument is set to true -cis-v1.23-t1.0.1,C-0155,Ensure that the --auto-tls argument is not set to true -cis-v1.23-t1.0.1,C-0156,Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate -cis-v1.23-t1.0.1,C-0157,Ensure that the --peer-client-cert-auth argument is set to true -cis-v1.23-t1.0.1,C-0158,Ensure that the --peer-auto-tls argument is not set to true -cis-v1.23-t1.0.1,C-0159,Ensure that a unique Certificate Authority is used for etcd -cis-v1.23-t1.0.1,C-0160,Ensure that a minimal audit policy is created -cis-v1.23-t1.0.1,C-0161,Ensure that the audit policy covers key security concerns -cis-v1.23-t1.0.1,C-0162,Ensure that the kubelet service file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0163,Ensure that the kubelet service file ownership is set to root:root -cis-v1.23-t1.0.1,C-0164,If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0165,If proxy kubeconfig file exists ensure ownership is set to root:root -cis-v1.23-t1.0.1,C-0166,Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root -cis-v1.23-t1.0.1,C-0168,Ensure that the certificate authorities file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0169,Ensure that the client certificate authorities file ownership is set to root:root -cis-v1.23-t1.0.1,C-0170,If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root -cis-v1.23-t1.0.1,C-0172,Ensure that the --anonymous-auth argument is set to false -cis-v1.23-t1.0.1,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow -cis-v1.23-t1.0.1,C-0174,Ensure that the --client-ca-file argument is set as appropriate -cis-v1.23-t1.0.1,C-0175,Verify that the --read-only-port argument is set to 0 -cis-v1.23-t1.0.1,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 -cis-v1.23-t1.0.1,C-0177,Ensure that the --protect-kernel-defaults argument is set to true -cis-v1.23-t1.0.1,C-0178,Ensure that the --make-iptables-util-chains argument is set to true -cis-v1.23-t1.0.1,C-0179,Ensure that the --hostname-override argument is not set -cis-v1.23-t1.0.1,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture -cis-v1.23-t1.0.1,C-0181,Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate -cis-v1.23-t1.0.1,C-0182,Ensure that the --rotate-certificates argument is not set to false -cis-v1.23-t1.0.1,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true -cis-v1.23-t1.0.1,C-0184,Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers -cis-v1.23-t1.0.1,C-0185,Ensure that the cluster-admin role is only used where required -cis-v1.23-t1.0.1,C-0186,Minimize access to secrets -cis-v1.23-t1.0.1,C-0187,Minimize wildcard use in Roles and ClusterRoles -cis-v1.23-t1.0.1,C-0188,Minimize access to create pods -cis-v1.23-t1.0.1,C-0189,Ensure that default service accounts are not actively used -cis-v1.23-t1.0.1,C-0190,Ensure that Service Account Tokens are only mounted where necessary -cis-v1.23-t1.0.1,C-0191,"Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster" -cis-v1.23-t1.0.1,C-0192,Ensure that the cluster has at least one active policy control mechanism in place -cis-v1.23-t1.0.1,C-0193,Minimize the admission of privileged containers -cis-v1.23-t1.0.1,C-0194,Minimize the admission of containers wishing to share the host process ID namespace -cis-v1.23-t1.0.1,C-0195,Minimize the admission of containers wishing to share the host IPC namespace -cis-v1.23-t1.0.1,C-0196,Minimize the admission of containers wishing to share the host network namespace -cis-v1.23-t1.0.1,C-0197,Minimize the admission of containers with allowPrivilegeEscalation -cis-v1.23-t1.0.1,C-0198,Minimize the admission of root containers -cis-v1.23-t1.0.1,C-0199,Minimize the admission of containers with the NET_RAW capability -cis-v1.23-t1.0.1,C-0200,Minimize the admission of containers with added capabilities -cis-v1.23-t1.0.1,C-0201,Minimize the admission of containers with capabilities assigned -cis-v1.23-t1.0.1,C-0202,Minimize the admission of Windows HostProcess Containers -cis-v1.23-t1.0.1,C-0203,Minimize the admission of HostPath volumes -cis-v1.23-t1.0.1,C-0204,Minimize the admission of containers which use HostPorts -cis-v1.23-t1.0.1,C-0205,Ensure that the CNI in use supports Network Policies -cis-v1.23-t1.0.1,C-0206,Ensure that all Namespaces have Network Policies defined -cis-v1.23-t1.0.1,C-0207,Prefer using secrets as files over secrets as environment variables -cis-v1.23-t1.0.1,C-0208,Consider external secret storage -cis-v1.23-t1.0.1,C-0209,Create administrative boundaries between resources using namespaces -cis-v1.23-t1.0.1,C-0210,Ensure that the seccomp profile is set to docker/default in your pod definitions -cis-v1.23-t1.0.1,C-0211,Apply Security Context to Your Pods and Containers -cis-v1.23-t1.0.1,C-0212,The default namespace should not be used -SOC2,C-0260,Missing network policy -SOC2,C-0012,Applications credentials in configuration files -SOC2,C-0186,Minimize access to secrets -SOC2,C-0035,Administrative Roles -SOC2,C-0067,Audit logs enabled -SOC2,C-0263,Ingress uses TLS -MITRE,C-0002,Prevent containers from allowing command execution -MITRE,C-0007,Roles with delete capabilities -MITRE,C-0012,Applications credentials in configuration files -MITRE,C-0014,Access Kubernetes dashboard -MITRE,C-0015,List Kubernetes secrets -MITRE,C-0020,Mount service principal -MITRE,C-0021,Exposed sensitive interfaces -MITRE,C-0026,Kubernetes CronJob -MITRE,C-0031,Delete Kubernetes events -MITRE,C-0035,Administrative Roles -MITRE,C-0036,Validate admission controller (validating) -MITRE,C-0037,CoreDNS poisoning -MITRE,C-0039,Validate admission controller (mutating) -MITRE,C-0042,SSH server running inside container -MITRE,C-0045,Writable hostPath mount -MITRE,C-0048,HostPath mount -MITRE,C-0052,Instance Metadata API -MITRE,C-0053,Access container service account -MITRE,C-0054,Cluster internal networking -MITRE,C-0057,Privileged container -MITRE,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. -MITRE,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability -MITRE,C-0066,Secret/etcd encryption enabled -MITRE,C-0067,Audit logs enabled -MITRE,C-0068,PSP enabled -MITRE,C-0069,Disable anonymous access to Kubelet service -MITRE,C-0070,Enforce Kubelet client TLS authentication -NSA,C-0002,Prevent containers from allowing command execution -NSA,C-0005,API server insecure port is enabled -NSA,C-0012,Applications credentials in configuration files -NSA,C-0013,Non-root containers -NSA,C-0016,Allow privilege escalation -NSA,C-0017,Immutable container filesystem -NSA,C-0030,Ingress and Egress blocked -NSA,C-0034,Automatic mapping of service account -NSA,C-0035,Administrative Roles -NSA,C-0038,Host PID/IPC privileges -NSA,C-0041,HostNetwork access -NSA,C-0044,Container hostPort -NSA,C-0046,Insecure capabilities -NSA,C-0054,Cluster internal networking -NSA,C-0055,Linux hardening -NSA,C-0057,Privileged container -NSA,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. -NSA,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability -NSA,C-0066,Secret/etcd encryption enabled -NSA,C-0067,Audit logs enabled -NSA,C-0068,PSP enabled -NSA,C-0069,Disable anonymous access to Kubelet service -NSA,C-0070,Enforce Kubelet client TLS authentication -NSA,C-0270,Ensure CPU limits are set -NSA,C-0271,Ensure memory limits are set -cis-eks-t1.2.0,C-0066,Secret/etcd encryption enabled -cis-eks-t1.2.0,C-0067,Audit logs enabled -cis-eks-t1.2.0,C-0078,Images from allowed registry -cis-eks-t1.2.0,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root -cis-eks-t1.2.0,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root -cis-eks-t1.2.0,C-0172,Ensure that the --anonymous-auth argument is set to false -cis-eks-t1.2.0,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow -cis-eks-t1.2.0,C-0174,Ensure that the --client-ca-file argument is set as appropriate -cis-eks-t1.2.0,C-0175,Verify that the --read-only-port argument is set to 0 -cis-eks-t1.2.0,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 -cis-eks-t1.2.0,C-0177,Ensure that the --protect-kernel-defaults argument is set to true -cis-eks-t1.2.0,C-0178,Ensure that the --make-iptables-util-chains argument is set to true -cis-eks-t1.2.0,C-0179,Ensure that the --hostname-override argument is not set -cis-eks-t1.2.0,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture -cis-eks-t1.2.0,C-0181,Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate -cis-eks-t1.2.0,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true -cis-eks-t1.2.0,C-0185,Ensure that the cluster-admin role is only used where required -cis-eks-t1.2.0,C-0186,Minimize access to secrets -cis-eks-t1.2.0,C-0187,Minimize wildcard use in Roles and ClusterRoles -cis-eks-t1.2.0,C-0188,Minimize access to create pods -cis-eks-t1.2.0,C-0189,Ensure that default service accounts are not actively used -cis-eks-t1.2.0,C-0190,Ensure that Service Account Tokens are only mounted where necessary -cis-eks-t1.2.0,C-0191,"Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster" -cis-eks-t1.2.0,C-0205,Ensure that the CNI in use supports Network Policies -cis-eks-t1.2.0,C-0206,Ensure that all Namespaces have Network Policies defined -cis-eks-t1.2.0,C-0207,Prefer using secrets as files over secrets as environment variables -cis-eks-t1.2.0,C-0209,Create administrative boundaries between resources using namespaces -cis-eks-t1.2.0,C-0211,Apply Security Context to Your Pods and Containers -cis-eks-t1.2.0,C-0212,The default namespace should not be used -cis-eks-t1.2.0,C-0213,Minimize the admission of privileged containers -cis-eks-t1.2.0,C-0214,Minimize the admission of containers wishing to share the host process ID namespace -cis-eks-t1.2.0,C-0215,Minimize the admission of containers wishing to share the host IPC namespace -cis-eks-t1.2.0,C-0216,Minimize the admission of containers wishing to share the host network namespace -cis-eks-t1.2.0,C-0217,Minimize the admission of containers with allowPrivilegeEscalation -cis-eks-t1.2.0,C-0218,Minimize the admission of root containers -cis-eks-t1.2.0,C-0219,Minimize the admission of containers with added capabilities -cis-eks-t1.2.0,C-0220,Minimize the admission of containers with capabilities assigned -cis-eks-t1.2.0,C-0221,Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider -cis-eks-t1.2.0,C-0222,Minimize user access to Amazon ECR -cis-eks-t1.2.0,C-0223,Minimize cluster access to read-only for Amazon ECR -cis-eks-t1.2.0,C-0225,Prefer using dedicated EKS Service Accounts -cis-eks-t1.2.0,C-0226,Prefer using a container-optimized OS when possible -cis-eks-t1.2.0,C-0227,Restrict Access to the Control Plane Endpoint -cis-eks-t1.2.0,C-0228,Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled -cis-eks-t1.2.0,C-0229,Ensure clusters are created with Private Nodes -cis-eks-t1.2.0,C-0230,Ensure Network Policy is Enabled and set as appropriate -cis-eks-t1.2.0,C-0231,Encrypt traffic to HTTPS load balancers with TLS certificates -cis-eks-t1.2.0,C-0232,Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 -cis-eks-t1.2.0,C-0233,Consider Fargate for running untrusted workloads -cis-eks-t1.2.0,C-0234,Consider external secret storage -cis-eks-t1.2.0,C-0235,Ensure that the kubelet configuration file has permissions set to 644 or more restrictive -cis-eks-t1.2.0,C-0238,Ensure that the kubeconfig file permissions are set to 644 or more restrictive -cis-eks-t1.2.0,C-0242,Hostile multi-tenant workloads -cis-eks-t1.2.0,C-0246,Avoid use of system:masters group -cis-aks-t1.2.0,C-0078,Images from allowed registry -cis-aks-t1.2.0,C-0088,RBAC enabled -cis-aks-t1.2.0,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root -cis-aks-t1.2.0,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root -cis-aks-t1.2.0,C-0172,Ensure that the --anonymous-auth argument is set to false -cis-aks-t1.2.0,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow -cis-aks-t1.2.0,C-0174,Ensure that the --client-ca-file argument is set as appropriate -cis-aks-t1.2.0,C-0175,Verify that the --read-only-port argument is set to 0 -cis-aks-t1.2.0,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 -cis-aks-t1.2.0,C-0177,Ensure that the --protect-kernel-defaults argument is set to true -cis-aks-t1.2.0,C-0178,Ensure that the --make-iptables-util-chains argument is set to true -cis-aks-t1.2.0,C-0179,Ensure that the --hostname-override argument is not set -cis-aks-t1.2.0,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture -cis-aks-t1.2.0,C-0182,Ensure that the --rotate-certificates argument is not set to false -cis-aks-t1.2.0,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true -cis-aks-t1.2.0,C-0185,Ensure that the cluster-admin role is only used where required -cis-aks-t1.2.0,C-0186,Minimize access to secrets -cis-aks-t1.2.0,C-0187,Minimize wildcard use in Roles and ClusterRoles -cis-aks-t1.2.0,C-0188,Minimize access to create pods -cis-aks-t1.2.0,C-0189,Ensure that default service accounts are not actively used -cis-aks-t1.2.0,C-0190,Ensure that Service Account Tokens are only mounted where necessary -cis-aks-t1.2.0,C-0201,Minimize the admission of containers with capabilities assigned -cis-aks-t1.2.0,C-0205,Ensure that the CNI in use supports Network Policies -cis-aks-t1.2.0,C-0206,Ensure that all Namespaces have Network Policies defined -cis-aks-t1.2.0,C-0207,Prefer using secrets as files over secrets as environment variables -cis-aks-t1.2.0,C-0208,Consider external secret storage -cis-aks-t1.2.0,C-0209,Create administrative boundaries between resources using namespaces -cis-aks-t1.2.0,C-0211,Apply Security Context to Your Pods and Containers -cis-aks-t1.2.0,C-0212,The default namespace should not be used -cis-aks-t1.2.0,C-0213,Minimize the admission of privileged containers -cis-aks-t1.2.0,C-0214,Minimize the admission of containers wishing to share the host process ID namespace -cis-aks-t1.2.0,C-0215,Minimize the admission of containers wishing to share the host IPC namespace -cis-aks-t1.2.0,C-0216,Minimize the admission of containers wishing to share the host network namespace -cis-aks-t1.2.0,C-0217,Minimize the admission of containers with allowPrivilegeEscalation -cis-aks-t1.2.0,C-0218,Minimize the admission of root containers -cis-aks-t1.2.0,C-0219,Minimize the admission of containers with added capabilities -cis-aks-t1.2.0,C-0235,Ensure that the kubelet configuration file has permissions set to 644 or more restrictive -cis-aks-t1.2.0,C-0238,Ensure that the kubeconfig file permissions are set to 644 or more restrictive -cis-aks-t1.2.0,C-0239,Prefer using dedicated AKS Service Accounts -cis-aks-t1.2.0,C-0240,Ensure Network Policy is Enabled and set as appropriate -cis-aks-t1.2.0,C-0241,Use Azure RBAC for Kubernetes Authorization. -cis-aks-t1.2.0,C-0242,Hostile multi-tenant workloads -cis-aks-t1.2.0,C-0243,Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider -cis-aks-t1.2.0,C-0244,Ensure Kubernetes Secrets are encrypted -cis-aks-t1.2.0,C-0245,Encrypt traffic to HTTPS load balancers with TLS certificates -cis-aks-t1.2.0,C-0247,Restrict Access to the Control Plane Endpoint -cis-aks-t1.2.0,C-0248,Ensure clusters are created with Private Nodes -cis-aks-t1.2.0,C-0249,Restrict untrusted workloads -cis-aks-t1.2.0,C-0250,Minimize cluster access to read-only for Azure Container Registry (ACR) -cis-aks-t1.2.0,C-0251,Minimize user access to Azure Container Registry (ACR) -cis-aks-t1.2.0,C-0252,Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled -cis-aks-t1.2.0,C-0254,Enable audit Logs -ArmoBest,C-0002,Prevent containers from allowing command execution -ArmoBest,C-0005,API server insecure port is enabled -ArmoBest,C-0012,Applications credentials in configuration files -ArmoBest,C-0013,Non-root containers -ArmoBest,C-0016,Allow privilege escalation -ArmoBest,C-0017,Immutable container filesystem -ArmoBest,C-0030,Ingress and Egress blocked -ArmoBest,C-0034,Automatic mapping of service account -ArmoBest,C-0035,Administrative Roles -ArmoBest,C-0038,Host PID/IPC privileges -ArmoBest,C-0041,HostNetwork access -ArmoBest,C-0044,Container hostPort -ArmoBest,C-0046,Insecure capabilities -ArmoBest,C-0049,Network mapping -ArmoBest,C-0054,Cluster internal networking -ArmoBest,C-0055,Linux hardening -ArmoBest,C-0057,Privileged container -ArmoBest,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. -ArmoBest,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability -ArmoBest,C-0061,Pods in default namespace -ArmoBest,C-0062,Sudo in container entrypoint -ArmoBest,C-0063,Portforwarding privileges -ArmoBest,C-0065,No impersonation -ArmoBest,C-0066,Secret/etcd encryption enabled -ArmoBest,C-0067,Audit logs enabled -ArmoBest,C-0068,PSP enabled -ArmoBest,C-0069,Disable anonymous access to Kubelet service -ArmoBest,C-0070,Enforce Kubelet client TLS authentication -ArmoBest,C-0078,Images from allowed registry -ArmoBest,C-0079,CVE-2022-0185-linux-kernel-container-escape -ArmoBest,C-0081,CVE-2022-24348-argocddirtraversal -ArmoBest,C-0087,CVE-2022-23648-containerd-fs-escape -ArmoBest,C-0089,CVE-2022-3172-aggregated-API-server-redirect -ArmoBest,C-0091,CVE-2022-47633-kyverno-signature-bypass -ArmoBest,C-0236,Verify image signature -ArmoBest,C-0237,Check if signature exists -ArmoBest,C-0270,Ensure CPU limits are set -ArmoBest,C-0271,Ensure memory limits are set -WorkloadScan,C-0078,Images from allowed registry -WorkloadScan,C-0236,Verify image signature -WorkloadScan,C-0237,Check if signature exists -WorkloadScan,C-0045,Writable hostPath mount -WorkloadScan,C-0048,HostPath mount -WorkloadScan,C-0257,Workload with PVC access -WorkloadScan,C-0207,Prefer using secrets as files over secrets as environment variables -WorkloadScan,C-0034,Automatic mapping of service account -WorkloadScan,C-0012,Applications credentials in configuration files -WorkloadScan,C-0041,HostNetwork access -WorkloadScan,C-0260,Missing network policy -WorkloadScan,C-0044,Container hostPort -WorkloadScan,C-0038,Host PID/IPC privileges -WorkloadScan,C-0046,Insecure capabilities -WorkloadScan,C-0013,Non-root containers -WorkloadScan,C-0016,Allow privilege escalation -WorkloadScan,C-0017,Immutable container filesystem -WorkloadScan,C-0055,Linux hardening -WorkloadScan,C-0057,Privileged container -WorkloadScan,C-0270,Ensure CPU limits are set -WorkloadScan,C-0271,Ensure memory limits are set -security,C-0005,API server insecure port is enabled -security,C-0012,Applications credentials in configuration files -security,C-0013,Non-root containers -security,C-0016,Allow privilege escalation -security,C-0017,Immutable container filesystem -security,C-0034,Automatic mapping of service account -security,C-0035,Administrative Roles -security,C-0038,Host PID/IPC privileges -security,C-0041,HostNetwork access -security,C-0044,Container hostPort -security,C-0045,Writable hostPath mount -security,C-0046,Insecure capabilities -security,C-0048,HostPath mount -security,C-0057,Privileged container -security,C-0066,Secret/etcd encryption enabled -security,C-0069,Disable anonymous access to Kubelet service -security,C-0070,Enforce Kubelet client TLS authentication -security,C-0074,Container runtime socket mounted -security,C-0211,Apply Security Context to Your Pods and Containers -security,C-0255,Workload with secret access -security,C-0256,Exposure to internet -security,C-0257,Workload with PVC access -security,C-0258,Workload with ConfigMap access -security,C-0259,Workload with credential access -security,C-0260,Missing network policy -security,C-0261,ServiceAccount token mounted -security,C-0262,Anonymous user has RoleBinding -security,C-0265,system:authenticated user has elevated roles -security,C-0267,Workload with cluster takeover roles -security,C-0270,Ensure CPU limits are set -security,C-0271,Ensure memory limits are set -security,C-0272,Workload with administrative roles -security,C-0273,Outdated Kubernetes version -ClusterScan,C-0066,Secret/etcd encryption enabled -ClusterScan,C-0088,RBAC enabled -ClusterScan,C-0067,Audit logs enabled -ClusterScan,C-0005,API server insecure port is enabled -ClusterScan,C-0262,Anonymous user has RoleBinding -ClusterScan,C-0265,system:authenticated user has elevated roles -ClusterScan,C-0015,List Kubernetes secrets -ClusterScan,C-0002,Prevent containers from allowing command execution -ClusterScan,C-0007,Roles with delete capabilities -ClusterScan,C-0063,Portforwarding privileges -ClusterScan,C-0036,Validate admission controller (validating) -ClusterScan,C-0039,Validate admission controller (mutating) -ClusterScan,C-0035,Administrative Roles -ClusterScan,C-0188,Minimize access to create pods -ClusterScan,C-0187,Minimize wildcard use in Roles and ClusterRoles -ClusterScan,C-0012,Applications credentials in configuration files -ClusterScan,C-0260,Missing network policy -ClusterScan,C-0256,Exposure to internet -ClusterScan,C-0038,Host PID/IPC privileges -ClusterScan,C-0041,HostNetwork access -ClusterScan,C-0048,HostPath mount -ClusterScan,C-0057,Privileged container -ClusterScan,C-0013,Non-root containers diff --git a/releaseDev/allcontrols.json b/releaseDev/allcontrols.json deleted file mode 100644 index fb7059b5f..000000000 --- a/releaseDev/allcontrols.json +++ /dev/null @@ -1,4656 +0,0 @@ -{ - "name": "AllControls", - "description": "Contains all the controls from all the frameworks", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "insecure-port-flag", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Roles with delete capabilities", - "attributes": { - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-excessive-delete-rights-v1", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - } - ] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - } - ] - }, - { - "name": "Access Kubernetes dashboard", - "attributes": { - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", - "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", - "controlID": "C-0014", - "baseScore": 2.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-access-dashboard-subject-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" - }, - { - "name": "rule-access-dashboard-wl-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-allow-privilege-escalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "immutable-container-filesystem", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" - } - ] - }, - { - "name": "Configured readiness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", - "controlID": "C-0018", - "example": "@controls/examples/c018.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "configured-readiness-probe", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Readiness probe is not configured", - "remediation": "Ensure Readiness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Mount service principal", - "attributes": { - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", - "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", - "controlID": "C-0020", - "baseScore": 4.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-mount-potential-credentials-paths", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "relevantCloudProviders": [ - "EKS", - "GKE", - "AKS" - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tstart_of_path := volumes_data[\"start_of_path\"]\n result := is_unsafe_paths(volume, start_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"start_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"start_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"start_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, start_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [start_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" - } - ] - }, - { - "name": "Exposed sensitive interfaces", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Initial access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", - "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", - "controlID": "C-0021", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "exposed-sensitive-interfaces-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.sensitiveInterfaces" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveInterfaces", - "name": "Sensitive interfaces", - "description": "List of known software interfaces that should not generally be exposed to the Internet." - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" - } - ] - }, - { - "name": "Kubernetes CronJob", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", - "test": "We list all CronJobs that exist in cluster for the user to approve.", - "controlID": "C-0026", - "baseScore": 1.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rule-deny-cronjobs", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob" - }, - "ruleLanguage": "rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if it's cronjob", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# alert cronjobs\n\n# handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ingress-and-egress-blocked", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if there are no ingress and egress defined for pod", - "remediation": "Make sure you define ingress and egress policies for all your Pods", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" - } - ] - }, - { - "name": "Delete Kubernetes events", - "attributes": { - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", - "test": "List who has delete/deletecollection RBAC permissions on events.", - "controlID": "C-0031", - "baseScore": 4.0, - "example": "@controls/examples/c031.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-delete-k8s-events-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Validate admission controller (validating)", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0036", - "baseScore": 3.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-validating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Validate admission controller" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns validating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-pid-ipc-privileges", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - } - ] - }, - { - "name": "Validate admission controller (mutating)", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0039", - "baseScore": 4.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-mutating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Validate admission controller" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns mutating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-network-access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - } - ] - }, - { - "name": "SSH server running inside container", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", - "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", - "controlID": "C-0042", - "baseScore": 3.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-ssh-to-pod-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-rw-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines if any workload contains a hostPath volume with rw permissions", - "remediation": "Set the readOnly field of the mount to true", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"deletePaths\": failed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n}" - } - ] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "insecure-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - } - ] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" - } - ] - }, - { - "name": "Network mapping", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0049", - "baseScore": 3.0, - "example": "@controls/examples/c049.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Instance Metadata API", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", - "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", - "controlID": "C-0052", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "instance-metadata-api-access", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "cloudProviderInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" - } - ] - }, - { - "name": "Access container service account", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", - "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", - "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", - "controlID": "C-0053", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "access-container-service-account-v1", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" - } - ] - }, - { - "name": "Cluster internal networking", - "attributes": { - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Linux hardening", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "linux-hardening", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define any linux security hardening", - "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" - } - ] - }, - { - "name": "Configured liveness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", - "controlID": "C-0056", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "configured-liveness-probe", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Liveness probe is not configured", - "remediation": "Ensure Liveness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" - } - ] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" - } - ] - }, - { - "name": "Pods in default namespace", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", - "remediation": "Create necessary namespaces and move all the pods from default namespace there.", - "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - }, - { - "name": "Sudo in container entrypoint", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", - "test": "Check that there is no 'sudo' in the container entrypoint", - "controlID": "C-0062", - "baseScore": 5.0, - "example": "@controls/examples/c062.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "sudo-in-container-entrypoint", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, start_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-portforward-v1", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "No impersonation", - "attributes": { - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", - "controlID": "C-0065", - "baseScore": 6.0, - "example": "@controls/examples/c065.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-impersonate-users-groups-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "PSP enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" - }, - { - "name": "psp-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Naked pods", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", - "long_description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", - "test": "Test if pods are not associated with Deployment, ReplicaSet etc. If not, fail.", - "controlID": "C-0073", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "naked-pods", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", - "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "name": "Container runtime socket mounted", - "attributes": { - "controlTypeTags": [ - "devops", - "smartRemediation" - ] - }, - "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", - "remediation": "Remove container runtime socket mount request or define an exception.", - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "containers-mounting-docker-socket", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/run/containerd/containerd.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/crio/crio.sock\"\n}\n" - } - ] - }, - { - "name": "Image pull policy on latest tag", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", - "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", - "test": "If imagePullPolicy = always pass, else fail.", - "controlID": "C-0075", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "image-pull-policy-is-not-set-to-always", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" - } - ] - }, - { - "name": "Label usage for resources", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "controlID": "C-0076", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "label-usage-for-resources", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.recommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.recommendedLabels", - "name": "Recommended Labels", - "description": "Kubescape checks that workloads have at least one label that identifies semantic attributes." - } - ], - "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" - } - ] - }, - { - "name": "K8s common labels usage", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "devops" - ] - }, - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", - "controlID": "C-0077", - "baseScore": 2.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "k8s-common-labels-usage", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.k8sRecommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.k8sRecommendedLabels", - "name": "Kubernetes Recommended Labels", - "description": "Kubescape checks that workloads have at least one of this list of configurable labels, as recommended in the Kubernetes documentation." - } - ], - "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" - } - ] - }, - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useUntilKubescapeVersion": "v2.3.8" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - }, - { - "name": "container-image-repository-v1", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useFromKubescapeVersion": "v2.9.0" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" - } - ] - }, - { - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0079", - "baseScore": 4.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-0185", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "LinuxKernelVariables" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n\n parsed_kernel_version_arr := parse_kernel_version_to_array(node.status.nodeInfo.kernelVersion)\n is_azure := parsed_kernel_version_arr[4] == \"azure\"\n\n is_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure)\n\n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n \"reviewPaths\": [\"kernelVersion\"],\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\n# General Kernel versions are between 5.1.1 and 5.16.2\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == false\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 16\n parsed_kernel_version_arr[2] < 2\n}\n\n# Azure kernel version with is 5.4.0-1067-azure\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == true\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 4\n parsed_kernel_version_arr[2] == 0\n parsed_kernel_version_arr[3] < 1067\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}\n\nparse_kernel_version_to_array(kernel_version_str) = output {\n\tversion_triplet := regex.find_n(`(\\d+\\.\\d+\\.\\d+)`, kernel_version_str,-1)\n version_triplet_array := split(version_triplet[0],\".\")\n\n build_vendor := regex.find_n(`-(\\d+)-(\\w+)`, kernel_version_str,-1)\n build_vendor_array := split(build_vendor[0],\"-\")\n\n output := [to_number(version_triplet_array[0]),to_number(version_triplet_array[1]),to_number(version_triplet_array[2]),to_number(build_vendor_array[1]),build_vendor_array[2]]\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" - } - ] - }, - { - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0081", - "baseScore": 4.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "CVE-2022-24348", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - } - ] - }, - { - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", - "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", - "controlID": "C-0087", - "baseScore": 7.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-23648", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" - } - ] - }, - { - "name": "RBAC enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rbac-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" - }, - { - "name": "rbac-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CVE-2022-39328-grafana-auth-bypass", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", - "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", - "controlID": "C-0090", - "baseScore": 9.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "CVE-2022-39328", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tclean_image := replace(image,\"-ubuntu\",\"\")\n\tversion := split(clean_image, \":\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 9\n\tminorVersion == 2\n\tsubVersion < 4\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - } - ] - }, - { - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", - "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", - "controlID": "C-0091", - "baseScore": 8.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "CVE-2022-47633", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - } - ] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "anonymous-access-enabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in case anonymous or unauthenticated user has any rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", - "remediation": "Remove any RBAC rules which allow anonymous users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n subject := rolebinding.subjects[i]\n isAnonymous(subject)\n delete_path := sprintf(\"subjects[%d]\", [i])\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"deletePaths\": [delete_path],\n \"failedPaths\": [delete_path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(subject) {\n subject.name == \"system:anonymous\"\n}\n\nisAnonymous(subject) {\n subject.name == \"system:unauthenticated\"\n}\n" - } - ] - }, - { - "controlID": "C-0265", - "name": "Authenticated user has sensitive permissions", - "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "system-authenticated-allowed-to-take-over-cluster", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in system:authenticated user has cluster takeover rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", - "remediation": "Remove any RBAC rules which allow system:authenticated users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n subjectVector := input[_]\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\n subject := rolebinding.subjects[k]\n # Check if the subject is gourp\n subject.kind == \"Group\"\n # Check if the subject is system:authenticated\n subject.name == \"system:authenticated\"\n\n\n # Find the bound roles\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n # Check if the role and rolebinding bound\n is_same_role_and_binding(role, rolebinding)\n\n\n # Check if the role has access to workloads, exec, attach, portforward\n\trule := role.rules[p]\n rule.resources[l] in [\"*\",\"pods\", \"pods/exec\", \"pods/attach\", \"pods/portforward\",\"deployments\",\"statefulset\",\"daemonset\",\"jobs\",\"cronjobs\",\"nodes\",\"secrets\"]\n\n\tfinalpath := array.concat([\"\"], [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [i]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": \"system:authenticated has sensitive roles\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\" : subjectVector\n\t\t},\n\t}\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"RoleBinding\"\n role.kind == \"Role\"\n rolebinding.metadata.namespace == role.metadata.namespace\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"ClusterRoleBinding\"\n role.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}" - } - ] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-cpu-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "CPU limits are not set.", - "remediation": "Ensure CPU limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-memory-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "memory limits are not set.", - "remediation": "Ensure memory limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0005", - "C-0007", - "C-0012", - "C-0013", - "C-0014", - "C-0015", - "C-0016", - "C-0017", - "C-0018", - "C-0020", - "C-0021", - "C-0026", - "C-0030", - "C-0031", - "C-0034", - "C-0035", - "C-0036", - "C-0038", - "C-0039", - "C-0041", - "C-0042", - "C-0044", - "C-0045", - "C-0046", - "C-0048", - "C-0049", - "C-0052", - "C-0053", - "C-0054", - "C-0055", - "C-0056", - "C-0057", - "C-0058", - "C-0059", - "C-0061", - "C-0062", - "C-0063", - "C-0065", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0073", - "C-0074", - "C-0075", - "C-0076", - "C-0077", - "C-0078", - "C-0079", - "C-0081", - "C-0087", - "C-0088", - "C-0090", - "C-0091", - "C-0262", - "C-0265", - "C-0270", - "C-0271" - ] -} \ No newline at end of file diff --git a/releaseDev/armobest.json b/releaseDev/armobest.json deleted file mode 100644 index 79de5e4f4..000000000 --- a/releaseDev/armobest.json +++ /dev/null @@ -1,3066 +0,0 @@ -{ - "name": "ArmoBest", - "description": "", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "insecure-port-flag", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - } - ] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - } - ] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-allow-privilege-escalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "immutable-container-filesystem", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" - } - ] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ingress-and-egress-blocked", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if there are no ingress and egress defined for pod", - "remediation": "Make sure you define ingress and egress policies for all your Pods", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" - } - ] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-pid-ipc-privileges", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - } - ] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-network-access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "insecure-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - } - ] - }, - { - "name": "Network mapping", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0049", - "baseScore": 3.0, - "example": "@controls/examples/c049.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Cluster internal networking", - "attributes": { - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Linux hardening", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "linux-hardening", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define any linux security hardening", - "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" - } - ] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" - } - ] - }, - { - "name": "Pods in default namespace", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", - "remediation": "Create necessary namespaces and move all the pods from default namespace there.", - "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - }, - { - "name": "Sudo in container entrypoint", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", - "test": "Check that there is no 'sudo' in the container entrypoint", - "controlID": "C-0062", - "baseScore": 5.0, - "example": "@controls/examples/c062.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "sudo-in-container-entrypoint", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, start_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-portforward-v1", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "No impersonation", - "attributes": { - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", - "controlID": "C-0065", - "baseScore": 6.0, - "example": "@controls/examples/c065.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-impersonate-users-groups-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "PSP enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" - }, - { - "name": "psp-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useUntilKubescapeVersion": "v2.3.8" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - }, - { - "name": "container-image-repository-v1", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useFromKubescapeVersion": "v2.9.0" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" - } - ] - }, - { - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0079", - "baseScore": 4.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-0185", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "LinuxKernelVariables" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n\n parsed_kernel_version_arr := parse_kernel_version_to_array(node.status.nodeInfo.kernelVersion)\n is_azure := parsed_kernel_version_arr[4] == \"azure\"\n\n is_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure)\n\n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n \"reviewPaths\": [\"kernelVersion\"],\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\n# General Kernel versions are between 5.1.1 and 5.16.2\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == false\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 16\n parsed_kernel_version_arr[2] < 2\n}\n\n# Azure kernel version with is 5.4.0-1067-azure\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == true\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 4\n parsed_kernel_version_arr[2] == 0\n parsed_kernel_version_arr[3] < 1067\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}\n\nparse_kernel_version_to_array(kernel_version_str) = output {\n\tversion_triplet := regex.find_n(`(\\d+\\.\\d+\\.\\d+)`, kernel_version_str,-1)\n version_triplet_array := split(version_triplet[0],\".\")\n\n build_vendor := regex.find_n(`-(\\d+)-(\\w+)`, kernel_version_str,-1)\n build_vendor_array := split(build_vendor[0],\"-\")\n\n output := [to_number(version_triplet_array[0]),to_number(version_triplet_array[1]),to_number(version_triplet_array[2]),to_number(build_vendor_array[1]),build_vendor_array[2]]\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" - } - ] - }, - { - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0081", - "baseScore": 4.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "CVE-2022-24348", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - } - ] - }, - { - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", - "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", - "controlID": "C-0087", - "baseScore": 7.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-23648", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" - } - ] - }, - { - "name": "CVE-2022-3172-aggregated-API-server-redirect", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [] - }, - "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", - "controlID": "C-0089", - "baseScore": 3.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-3172", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apiregistration.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "APIService" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "apiserverinfo.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "APIServerInfo" - ] - } - ], - "ruleDependencies": [], - "description": "List aggregated API server APIServices if kube-api-server version is vulnerable to CVE-2022-3172", - "remediation": "Upgrade the Kubernetes version to one of the fixed versions. The following versions are fixed: `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\tversion = get_api_server_version(api_infos)\n\tis_api_server_version_affected(version)\n\n\t# Find the service that exposes the extended API\n\tservices = [obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\tcount(services) == 1\n\tservice = services[0]\n\n\tmsg := {\n\t\t\"alertMessage\": \"the following pair of APIService and Service may redirect client traffic to any URL\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj, service]},\n\t}\n}\n\n# current kubescpae version (v2.0.171) still not support this resource\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tsemver.is_valid(v)\n\tversion = v\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tnot semver.is_valid(v)\n\tversion := \"\"\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) != 1\n\tversion = \"\"\n}\n\nis_api_server_version_affected(version) {\n\tversion == \"\"\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.25.0\") == 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.24.0\") >= 0\n\tsemver.compare(version, \"1.24.4\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.23.0\") >= 0\n\tsemver.compare(version, \"1.23.10\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.22.0\") >= 0\n\tsemver.compare(version, \"1.22.13\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.21.14\") <= 0\n}\n", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\t# Find the service that exposes the extended API\n\tservices = [ obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\n\tmsg := {\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n" - } - ] - }, - { - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", - "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", - "controlID": "C-0091", - "baseScore": 8.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "CVE-2022-47633", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - } - ] - }, - { - "controlID": "C-0236", - "name": "Verify image signature", - "description": "Verifies the signature of each image with given public keys", - "long_description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "manual_test": "", - "references": [], - "attributes": { - "actionRequired": "configuration" - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "verify-image-signature", - "attributes": { - "useFromKubescapeVersion": "v2.1.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "ruleQuery": "armo_builtins", - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.trustedCosignPublicKeys", - "name": "Trusted Cosign public keys", - "description": "A list of trusted Cosign public keys that are used for validating container image signatures." - } - ], - "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.containers[%v].image\", [i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0237", - "name": "Check if signature exists", - "description": "Ensures that all images contain some signature", - "long_description": "Verifies that each image is signed", - "remediation": "Replace the image with a signed image", - "manual_test": "", - "references": [], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "has-image-signature", - "attributes": { - "useFromKubescapeVersion": "v2.1.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Ensures that all images contain some signature", - "remediation": "Replace the image with a signed image", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" - } - ] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-cpu-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "CPU limits are not set.", - "remediation": "Ensure CPU limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-memory-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "memory limits are not set.", - "remediation": "Ensure memory limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0005", - "C-0012", - "C-0013", - "C-0016", - "C-0017", - "C-0030", - "C-0034", - "C-0035", - "C-0038", - "C-0041", - "C-0044", - "C-0046", - "C-0049", - "C-0054", - "C-0055", - "C-0057", - "C-0058", - "C-0059", - "C-0061", - "C-0062", - "C-0063", - "C-0065", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0078", - "C-0079", - "C-0081", - "C-0087", - "C-0089", - "C-0091", - "C-0236", - "C-0237", - "C-0270", - "C-0271" - ] -} \ No newline at end of file diff --git a/releaseDev/attack_tracks.json b/releaseDev/attack_tracks.json deleted file mode 100644 index f2679f12e..000000000 --- a/releaseDev/attack_tracks.json +++ /dev/null @@ -1,109 +0,0 @@ -[ - { - "apiVersion": "regolibrary.kubescape/v1alpha1", - "kind": "AttackTrack", - "metadata": { - "name": "external-workload-with-cluster-takeover-roles" - }, - "spec": { - "version": null, - "data": { - "name": "Initial Access", - "description": "An attacker can access the Kubernetes environment.", - "subSteps": [ - { - "name": "Cluster Access", - "description": "An attacker has access to sensitive information and can leverage them by creating pods in the cluster." - } - ] - } - } - }, - { - "apiVersion": "regolibrary.kubescape/v1alpha1", - "kind": "AttackTrack", - "metadata": { - "name": "external-database-without-authentication" - }, - "spec": { - "version": null, - "data": { - "name": "Initial Access", - "description": "An attacker can access the Kubernetes environment.", - "subSteps": [ - { - "name": "Unauthenticated Access", - "description": "An unauthenticated attacker can access resources." - } - ] - } - } - }, - { - "apiVersion": "regolibrary.kubescape/v1alpha1", - "kind": "AttackTrack", - "metadata": { - "name": "service-destruction" - }, - "spec": { - "version": null, - "data": { - "name": "Initial Access", - "description": "An attacker can access the Kubernetes environment.", - "subSteps": [ - { - "name": "Denial of service", - "description": "An attacker can overload the workload, making it unavailable." - } - ] - } - } - }, - { - "apiVersion": "regolibrary.kubescape/v1alpha1", - "kind": "AttackTrack", - "metadata": { - "name": "workload-external-track" - }, - "spec": { - "version": null, - "data": { - "name": "Initial Access", - "description": "An attacker can access the Kubernetes environment.", - "subSteps": [ - { - "name": "Execution (Vulnerable Image)", - "description": "An attacker can execute malicious code by exploiting vulnerable images.", - "checksVulnerabilities": true, - "subSteps": [ - { - "name": "Data Collection", - "description": "An attacker can gather data." - }, - { - "name": "Secret Access", - "description": "An attacker can steal secrets." - }, - { - "name": "Credential access", - "description": "An attacker can steal account names and passwords." - }, - { - "name": "Privilege Escalation (Node)", - "description": "An attacker can gain permissions and access node resources." - }, - { - "name": "Persistence", - "description": "An attacker can create a foothold." - }, - { - "name": "Lateral Movement (Network)", - "description": "An attacker can move through the network." - } - ] - } - ] - } - } - } -] \ No newline at end of file diff --git a/releaseDev/cis-aks-t1.2.0.json b/releaseDev/cis-aks-t1.2.0.json deleted file mode 100644 index 8214ae81c..000000000 --- a/releaseDev/cis-aks-t1.2.0.json +++ /dev/null @@ -1,4282 +0,0 @@ -{ - "name": "cis-aks-t1.2.0", - "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", - "attributes": { - "armoBuiltin": true, - "version": "v1.2.0" - }, - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "2": { - "name": "Master (Control Plane) Configuration", - "id": "2", - "subSections": { - "1": { - "name": "Logging", - "id": "2.1", - "controlsIDs": [ - "C-0254" - ] - } - } - }, - "3": { - "name": "Worker Nodes", - "id": "3", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "3.1", - "controlsIDs": [ - "C-0167", - "C-0171", - "C-0235", - "C-0238" - ] - }, - "2": { - "name": "Kubelet", - "id": "3.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0182", - "C-0183" - ] - } - } - }, - "4": { - "name": "Policies", - "id": "4", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "4.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190" - ] - }, - "2": { - "name": "Pod Security Standards", - "id": "4.2", - "controlsIDs": [ - "C-0201", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219" - ] - }, - "3": { - "name": "Azure Policy / OPA", - "id": "4.3", - "controlsIDs": [] - }, - "4": { - "name": "CNI Plugin", - "id": "4.4", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "5": { - "name": "Secrets Management", - "id": "4.5", - "controlsIDs": [ - "C-0207", - "C-0208" - ] - }, - "6": { - "name": "Extensible Admission Control", - "id": "4.6", - "controlsIDs": [] - }, - "7": { - "name": "General Policies", - "id": "4.7", - "controlsIDs": [ - "C-0209", - "C-0211", - "C-0212" - ] - } - } - }, - "5": { - "name": "Managed services", - "id": "5", - "subSections": { - "1": { - "name": "Image Registry and Image Scanning", - "id": "5.1", - "controlsIDs": [ - "C-0078", - "C-0243", - "C-0250", - "C-0251" - ] - }, - "2": { - "name": "Access and identity options for Azure Kubernetes Service (AKS)", - "id": "5.2", - "controlsIDs": [ - "C-0239", - "C-0241" - ] - }, - "3": { - "name": "Key Management Service (KMS)", - "id": "5.3", - "controlsIDs": [ - "C-0244" - ] - }, - "4": { - "name": "Cluster Networking", - "id": "5.4", - "controlsIDs": [ - "C-0240", - "C-0245", - "C-0247", - "C-0248", - "C-0252" - ] - }, - "5": { - "name": "Authentication and Authorization", - "id": "5.5", - "controlsIDs": [ - "C-0088" - ] - }, - "6": { - "name": "Other Cluster Configurations", - "id": "5.6", - "controlsIDs": [ - "C-0242", - "C-0249" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "name": "CIS-5.1.4 Minimize Container Registries to only those approved", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Use approved container registries.", - "remediation": "If you are using Azure Container Registry you have this option:\n\n For other non-AKS repos using admission controllers or Azure Policy will also work.\n\n Limiting or locking down egress traffic is also recommended:\n", - "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useUntilKubescapeVersion": "v2.3.8" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - }, - { - "name": "container-image-repository-v1", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useFromKubescapeVersion": "v2.9.0" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" - } - ], - "references": [ - "\n\n \n\n " - ], - "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry." - }, - { - "name": "CIS-5.5.1 Manage Kubernetes RBAC users with Azure AD", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "long_description": "Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators.", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rbac-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" - }, - { - "name": "rbac-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" - } - ], - "references": [ - "\n\n " - ] - }, - { - "controlID": "C-0167", - "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", - "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0171", - "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0172", - "name": "CIS-3.2.1 Ensure that the --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"anonymous\": \"enabled\": false\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--anonymous-auth=false\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*anonymous\":{\"enabled\":false}\"` by extracting the live configuration from the nodes running kubelet.\\*\\*See detailed step-by-step configmap procedures in[Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": { \"anonymous\": { \"enabled\": false }` argument is set to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"anonymous\":{\"enabled\":false}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0173", - "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\"... \"webhook\":{\"enabled\":true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--authorization-mode=Webhook\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"authentication\": \"webhook\": \"enabled\"` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": {\"webhook\": { \"enabled\": is set to true`.\n\n If the `\"authentication\": {\"mode\": {` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `\"authentication\": {\"mode\": {` to something other than `AlwaysAllow`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"webhook\":{\"enabled\":true}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-authorization-mode-alwaysAllow", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Do not allow all requests. Enable explicit authorization.", - "remediation": "Change authorization mode to Webhook.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [\"authorization.mode\"],\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0174", - "name": "CIS-3.2.3 Ensure that the --client-ca-file argument is set as appropriate", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile:\" to the location of the client CA file.\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--client-ca-file=\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"x509\": {\"clientCAFile:\"` set to the location of the client certificate authority file.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"x509\": {\"clientCAFile:\"` argument exists and is set to the location of the client certificate authority file.\n\n If the `\"x509\": {\"clientCAFile:\"` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `\"authentication\": { \"x509\": {\"clientCAFile:\"` to the location of the client certificate authority file.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication.. x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0175", - "name": "CIS-3.2.4 Ensure that the --read-only-port is secured", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\nreadOnlyPort to 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For all remediations:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "read-only-port-enabled-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet has read-only port enabled.", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"reviewPaths\": [\"readOnlyPort\"],\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0176", - "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-streaming-connection-idle-timeout", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if a kubelet has not disabled timeouts on streaming connections", - "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0177", - "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-protect-kernel-defaults", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the --protect-kernel-defaults argument is set to true.", - "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"protectKernelDefaults\"],\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0178", - "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `makeIPTablesUtilChains` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that if the `makeIPTablesUtilChains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-ip-tables", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensures that the --make-iptables-util-chains argument is set to true.", - "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0179", - "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", - "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-hostname-override", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --hostname-override argument is not set.", - "remediation": "Unset the --hostname-override argument.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0180", - "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "See the AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-event-qps", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", - "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"eventRecordQPS\"],\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0182", - "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not set to false", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", - "references": [ - "\n\n \n\n \n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-rotate-certificates", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --rotate-certificates argument is not set to false.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"rotateCertificates\"],\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0183", - "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateKubeletServerCertificate\":true\n\n```\n **Remediation Method 2:**\n\n If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`.\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `RotateKubeletServerCertificate` is set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-rotate-kubelet-server-certificate", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`RotateKubeletServerCertificate=true`, args[i])\n}\n" - } - ] - }, - { - "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "cluster-admin-role", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\n# regal ignore:rule-length\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-create-pod", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can create pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-default-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" - }, - { - "name": "namespace-without-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Namespace", - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace does not have service accounts (not incluiding default)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "controlID": "C-0201", - "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-restricted-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-restricted-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0205", - "name": "CIS-4.4.1 Ensure latest CNI version is used", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", - "manual_test": "Ensure CNI plugin supports network policies.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None.", - "default_value": "This will depend on the CNI plugin in use.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-cni-in-use-supports-network-policies", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [], - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" - } - ] - }, - { - "name": "CIS-4.4.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "default_value": "By default, network policies are not created.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.5.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "category": { - "name": "Workload", - "subCategory": { - "name": "Secrets", - "id": "Cat-3" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-secrets-in-env-var", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if Pods have secrets in environment variables", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "CIS-4.5.2 Consider external secret storage", - "controlID": "C-0208", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "manual_test": "Review your secrets management implementation.", - "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 5, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "external-secret-storage", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" - } - ] - }, - { - "name": "CIS-4.7.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Azure AKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "\n\n \n\n \n\n ." - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "default_value": "When you create an AKS cluster, the following namespaces are available:\n\n NAMESPACES\nNamespace Description\ndefault Where pods and deployments are created by default when none is provided. In smaller environments, you can deploy applications directly into the default namespace without creating additional logical separations. When you interact with the Kubernetes API, such as with kubectl get pods, the default namespace is used when none is specified.\nkube-system Where core resources exist, such as network features like DNS and proxy, or the Kubernetes dashboard. You typically don't deploy your own applications into this namespace.\nkube-public Typically not used, but can be used for resources to be visible across the whole cluster, and can be viewed by any user.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-namespaces", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - } - ], - "ruleDependencies": [], - "description": "lists all namespaces for users to review", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.7.2 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this:\n\n \n```\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: restricted\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n # Assume that persistentVolumes set up by the cluster admin are safe to use.\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'MustRunAsNonRoot'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n\n```\n This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added.\n\n Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - }, - { - "name": "immutable-container-filesystem", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" - }, - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - }, - { - "name": "drop-capability-netraw", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "set-seLinuxOptions", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if workload and container do not define any seLinuxOptions", - "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-seccomp-profile", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-procmount-default", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if container does not define securityContext.procMount to Default.", - "remediation": "Set securityContext.procMount to Default", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n" - }, - { - "name": "set-fsgroup-value", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n" - }, - { - "name": "set-fsgroupchangepolicy-value", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" - }, - { - "name": "set-sysctls-params", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.sysctls is not set.", - "remediation": "Set securityContext.sysctls params", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has sysctls set\n not pod.spec.securityContext.sysctls\n\n path := \"spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.sysctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has sysctls set\n not wl.spec.template.spec.securityContext.sysctls\n\n path := \"spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.sysctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has sysctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.sysctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "set-supplementalgroups-values", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.supplementalgroups is not set.", - "remediation": "Set securityContext.supplementalgroups values", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n\tnot pod.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n\tnot wl.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n\tnot cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n" - }, - { - "name": "rule-allow-privilege-escalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "CIS-4.7.3 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get all -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "rolebinding-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "role-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "configmap-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "endpoints-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Endpoints" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "persistentvolumeclaim-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PersistentVolumeClaim" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "podtemplate-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodTemplate" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "replicationcontroller-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ReplicationController" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "service-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "serviceaccount-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "endpointslice-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "discovery.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "EndpointSlice" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "horizontalpodautoscaler-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "autoscaling" - ], - "apiVersions": [ - "v2" - ], - "resources": [ - "HorizontalPodAutoscaler" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "lease-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "coordination.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Lease" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "csistoragecapacity-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "storage.k8s.io" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "CSIStorageCapacity" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ingress-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "poddisruptionbudget-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodDisruptionBudget" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-secret-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Secret" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - }, - { - "controlID": "C-0213", - "name": "CIS-4.2.1 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n as an alternative AZ CLI can be used:\n\n \n```\naz aks list --output yaml\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 8.0, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an AKS cluster, the value of \"enablePodSecurityPolicy\" is null.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "psp-deny-privileged-container", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0214", - "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostpid", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0215", - "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostipc", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0216", - "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostnetwork", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0217", - "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-allowprivilegeescalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0218", - "name": "CIS-4.2.6 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-root-container", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" - } - ] - }, - { - "controlID": "C-0219", - "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-allowed-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0235", - "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "None.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0238", - "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "description": "If `kubelet` is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", - "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0239", - "name": "CIS-5.2.1 Prefer using dedicated AKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-default-service-accounts-has-only-default-roles", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"deletePaths\": [sprintf(\"subjects[%d]\", [i])],\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0240", - "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", - "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", - "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "rule-cni-enabled-aks", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if cni is not enabled like defined in:\n# https://learn.microsoft.com/en-us/azure/aks/use-network-policies#create-an-aks-cluster-and-enable-network-policy\ndeny[msga] {\n\tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot cni_enabled_aks(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"cni is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_describe,\n\t\t},\n\t}\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"azure\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"kubenet\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n" - } - ] - }, - { - "controlID": "C-0241", - "name": "CIS-5.2.2 Use Azure RBAC for Kubernetes Authorization", - "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", - "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", - "remediation": "Set Azure RBAC as access system.", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-azure-rbac-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.", - "remediation": "Enable Azure RBAC on AKS by using the following command: az aks update -g -n --enable-azure-rbac", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails in case Azure RBAC is not set on AKS instance.\ndeny[msga] {\n \tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot isAzureRBACEnabled(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Azure RBAC is not set. Enable it using the command: az aks update -g -n --enable-azure-rbac\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"az aks update -g -n --enable-azure-rbac\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": cluster_describe\n\t\t},\n\t} \n}\n\n# isAzureRBACEnabled check if Azure RBAC is enabled into ClusterDescribe object\n# retrieved from azure cli.\nisAzureRBACEnabled(properties) {\n properties.aadProfile.enableAzureRBAC == true\n}\n" - } - ] - }, - { - "controlID": "C-0242", - "name": "CIS-5.6.2 Hostile multi-tenant workloads", - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "rule-hostile-multitenant-workloads", - "attributes": { - "actionRequired": "manual review" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "ruleDependencies": [], - "configInputs": [], - "controlConfigInputs": [], - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", - "remediation": "Use physically isolated clusters", - "ruleQuery": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" - } - ] - }, - { - "controlID": "C-0243", - "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", - "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "Scan images being deployed to Azure (AKS) for vulnerabilities. Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security. When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file. When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", - "remediation": "Enable Azure Defender image scanning. Command: az aks update --enable-defender --resource-group --name ", - "ruleQuery": "armo_builtin", - "rule": "package armo_builtins\n\n# fails in case Azure Defender image scanning is not enabled.\ndeny[msga] {\n cluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties \n\n not isAzureImageScanningEnabled(properties)\n\n msga := {\n\t\t\"alertMessage\": \"Azure Defender image scanning is not enabled.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks update --enable-defender --resource-group --name \",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_describe\n },\n\n\t}\n}\n\n# isAzureImageScanningEnabled check if Azure Defender is enabled into the ClusterDescribe object.\nisAzureImageScanningEnabled(properties) {\n properties.securityProfile.defender.securityMonitoring.enabled == true\n}\n" - } - ] - }, - { - "controlID": "C-0244", - "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted", - "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - } - ] - }, - { - "controlID": "C-0245", - "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "encrypt-traffic-to-https-load-balancers-with-tls-certificates", - "attributes": { - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails in case of 'Services' of type 'LoadBalancer' are not found.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type != \"LoadBalancer\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"No LoadBalancer service found.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n\t\t}\n\t}\n}\n\n# fails in case 'Service' object has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tnot svc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"]\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Service' object has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] != \"true\"\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Ingress' object has spec.tls value not set.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tnot isTLSSet(ingress.spec)\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has 'spec.tls' value not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"spec.tls\"],\n \t\"failedPaths\": [\"spec.tls\"],\n \t\"fixPaths\":[],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\n# fails in case 'Ingress' object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tisTLSSet(ingress.spec)\n\tingress.metadata.annotations[\"kubernetes.io/ingress.class\"] != \"azure/application-gateway\"\n\n\tpath := \"metadata.annotations[kubernetes.io/ingress.class]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"azure/application-gateway\"}],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\nisTLSSet(spec) {\n\tcount(spec.tls) > 0\n}\n" - } - ] - }, - { - "controlID": "C-0247", - "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", - "default_value": "By default, Endpoint Private Access is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "restrict-access-to-the-control-plane-endpoint", - "attributes": { - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\npackage armo_builtins\n\n# fails in case authorizedIPRanges is not set.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isAuthorizedIPRangesSet(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Parameter 'authorizedIPRanges' was not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n\n}\n\nisAuthorizedIPRangesSet(config) {\n\tcount(config.properties.apiServerAccessProfile.authorizedIPRanges) > 0\n}\n" - } - ] - }, - { - "controlID": "C-0248", - "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-clusters-are-created-with-private-nodes", - "attributes": { - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "remediation": "az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\n# fails in case enablePrivateCluster is set to false.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateClusterEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Cluster does not have private nodes.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr\",\n \t\"alertObject\": {\n\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateClusterEnabled(config) {\n\tconfig.properties.apiServerAccessProfile.enablePrivateCluster == true\n}\n" - } - ] - }, - { - "controlID": "C-0249", - "name": "CIS-5.6.1 Restrict untrusted workloads", - "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", - "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "actionRequired": "manual review" - }, - "baseScore": 5, - "impact_statement": "", - "default_value": "ACI is not a default component of the AKS", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "rule-manual", - "attributes": { - "actionRequired": "manual review", - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", - "remediation": "", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" - } - ] - }, - { - "controlID": "C-0250", - "name": "CIS-5.1.2 Minimize cluster access to read-only for Azure Container Registry (ACR)", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", - "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-service-principle-has-read-only-permissions", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - }, - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if servicePrincipal has permissions that are not read-only\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"aks\"\n\n\troleAssignment := resources.data.roleAssignments[_]\n\troleAssignment.properties.principalType == \"ServicePrincipal\"\n\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"aks\"\n\n\tpolicy := policies.data.roleDefinitions[_]\n\tpolicy.id == roleAssignment.properties.roleDefinitionId\n\n\t# check if policy has at least one action that is not read\n\tsome action in policy.properties.permissions[_].actions\n\t\tnot endswith(action, \"read\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"ServicePrincipal has permissions that are not read-only to ACR.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0251", - "name": "CIS-5.1.3 Minimize user access to Azure Container Registry (ACR)", - "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-role-definitions-in-acr", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# return ListEntitiesForPolicies resource in azure\ndeny[msg] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.apiVersion == \"management.azure.com/v1\"\n\tresources.metadata.provider == \"aks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0252", - "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", - "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled", - "attributes": { - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", - "ruleQuery": "armo_builtins", - "rule": "\npackage armo_builtins\n\n# fails in case privateEndpoint.id parameter is not found on ClusterDescribe\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateEndpointEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Private endpoint not enabled.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateEndpointEnabled(config) {\n\tconfig.properties.privateEndpoint.id\n}\n" - } - ] - }, - { - "controlID": "C-0254", - "name": "CIS-2.1.1 Enable audit Logs", - "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", - "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", - "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", - "default_value": "By default, cluster control plane logs aren't sent to be Logged.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "rule-manual", - "attributes": { - "actionRequired": "manual review", - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", - "remediation": "", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" - } - ] - } - ], - "ControlsIDs": [ - "C-0078", - "C-0088", - "C-0167", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0182", - "C-0183", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0201", - "C-0205", - "C-0206", - "C-0207", - "C-0208", - "C-0209", - "C-0211", - "C-0212", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0235", - "C-0238", - "C-0239", - "C-0240", - "C-0241", - "C-0242", - "C-0243", - "C-0244", - "C-0245", - "C-0247", - "C-0248", - "C-0249", - "C-0250", - "C-0251", - "C-0252", - "C-0254" - ] -} \ No newline at end of file diff --git a/releaseDev/cis-eks-t1.2.0.json b/releaseDev/cis-eks-t1.2.0.json deleted file mode 100644 index 0e00ccc52..000000000 --- a/releaseDev/cis-eks-t1.2.0.json +++ /dev/null @@ -1,4456 +0,0 @@ -{ - "name": "cis-eks-t1.2.0", - "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", - "attributes": { - "armoBuiltin": true, - "version": "v1.2.0" - }, - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "2": { - "name": "Control Plane Configuration", - "id": "2", - "subSections": { - "1": { - "name": "Logging", - "id": "2.1", - "controlsIDs": [ - "C-0067" - ] - } - } - }, - "3": { - "name": "Worker Nodes", - "id": "3", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "3.1", - "controlsIDs": [ - "C-0167", - "C-0171", - "C-0235", - "C-0238" - ] - }, - "2": { - "name": "Kubelet", - "id": "3.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0183" - ] - }, - "3": { - "name": "Container Optimized OS", - "id": "3.3", - "controlsIDs": [ - "C-0226" - ] - } - } - }, - "4": { - "name": "Policies", - "id": "4", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "4.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191", - "C-0246" - ] - }, - "2": { - "name": "Pod Security Policies", - "id": "4.2", - "controlsIDs": [ - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0220" - ] - }, - "3": { - "name": "CNI Plugin", - "id": "4.3", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "4": { - "name": "Secrets Management", - "id": "4.4", - "controlsIDs": [ - "C-0207", - "C-0234" - ] - }, - "6": { - "name": "General Policies", - "id": "4.6", - "controlsIDs": [ - "C-0209", - "C-0211", - "C-0212" - ] - } - } - }, - "5": { - "name": "Managed services", - "id": "5", - "subSections": { - "1": { - "name": "Image Registry and Image Scanning", - "id": "5.1", - "controlsIDs": [ - "C-0078", - "C-0221", - "C-0222", - "C-0223" - ] - }, - "2": { - "name": "Identity and Access Management (IAM)", - "id": "5.2", - "controlsIDs": [ - "C-0225" - ] - }, - "3": { - "name": "AWS EKS Key Management Service", - "id": "5.3", - "controlsIDs": [ - "C-0066" - ] - }, - "4": { - "name": "Cluster Networking", - "id": "5.4", - "controlsIDs": [ - "C-0227", - "C-0228", - "C-0229", - "C-0230", - "C-0231" - ] - }, - "5": { - "name": "Authentication and Authorization", - "id": "5.5", - "controlsIDs": [ - "C-0232" - ] - }, - "6": { - "name": "Other Cluster Configurations", - "id": "5.6", - "controlsIDs": [ - "C-0233", - "C-0242" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation.", - "remediation": "This process can only be performed during Cluster Creation.\n\n Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section.", - "long_description": "Kubernetes can store secrets that pods can access via a mounted volume. Today, Kubernetes secrets are stored with Base64 encoding, but encrypting is the recommended approach. Amazon EKS clusters version 1.13 and higher support the capability of encrypting your Kubernetes secrets using AWS Key Management Service (KMS) Customer Managed Keys (CMK). The only requirement is to enable the encryption provider support during EKS cluster creation.\n\n Use AWS Key Management Service (KMS) keys to provide envelope encryption of Kubernetes secrets stored in Amazon EKS. Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.\n\n Application-layer Secrets Encryption provides an additional layer of security for sensitive data, such as user defined Secrets and Secrets required for the operation of the cluster, such as service account keys, which are all stored in etcd.\n\n Using this functionality, you can use a key, that you manage in AWS KMS, to encrypt data at the application layer. This protects against attackers in the event that they manage to gain access to etcd.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ], - "manual_test": "Using the etcdctl commandline, read that secret out of etcd:\n\n \n```\netcdCTL_API=3 etcdctl get /registry/secrets/default/secret1 [...] | hexdump -C\n\n```\n where [...] must be the additional arguments for connecting to the etcd server.\n\n Verify the stored secret is prefixed with k8s:enc:aescbc:v1: which indicates the aescbc provider has encrypted the resulting data.", - "references": [ - "https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-eks-adds-envelope-encryption-for-secrets-with-aws-kms/" - ], - "impact_statement": "", - "default_value": "By default secrets created using the Kubernetes API are stored in *tmpfs* and are encrypted at rest." - }, - { - "name": "CIS-2.1.1 Enable audit Logs", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Control plane logs provide visibility into operation of the EKS Control plane component systems. The API server audit logs record all accepted and rejected requests in the cluster. When enabled via EKS configuration the control plane logs for a cluster are exported to a CloudWatch Log Group for persistence.", - "remediation": "**From Console:**\n\n 1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n\n \n```\nAPI server: Enabled\nAudit: Enabled\t\nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n\n```\n 5. Click 'Save Changes'.\n\n **From CLI:**\n\n \n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n\n```", - "long_description": "Audit logs enable visibility into all API server requests from authentic and anonymous sources. Stored log data can be analyzed manually or with tools to identify and understand anomalous or negative activity and lead to intelligent remediations.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-5.1.4 Minimize Container Registries to only those approved", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Use approved container registries.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useUntilKubescapeVersion": "v2.3.8" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - }, - { - "name": "container-image-repository-v1", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useFromKubescapeVersion": "v2.9.0" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" - } - ], - "references": [ - "https://aws.amazon.com/blogs/opensource/using-open-policy-agent-on-amazon-eks/" - ], - "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry.", - "default_value": "" - }, - { - "controlID": "C-0167", - "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", - "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on each worker node.\n\n For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AWS EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0171", - "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the AWS EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0172", - "name": "CIS-3.2.1 Ensure that the Anonymous Auth is Not Enabled", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Disable Anonymous Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--anonymous-auth=false\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the[Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Anonymous Authentication is not enabled. This may be configured as a command line argument to the kubelet service with `--anonymous-auth=false` or in the kubelet configuration file via `\"authentication\": { \"anonymous\": { \"enabled\": false }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with `kubectl` on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Anonymous Authentication is not enabled checking that `\"authentication\": { \"anonymous\": { \"enabled\": false }` is in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "See the EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0173", - "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets can be configured to allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Enable Webhook Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n\n```\n Next, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n \n```\n\"authorization\": { \"mode\": \"Webhook }\n\n```\n Finer detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Webhook Authentication is enabled. This may be enabled as a command line argument to the kubelet service with `--authentication-token-webhook` or in the kubelet configuration file via `\"authentication\": { \"webhook\": { \"enabled\": true } }`.\n\n Verify that the Authorization Mode is set to `WebHook`. This may be set as a command line argument to the kubelet service with `--authorization-mode=Webhook` or in the configuration file via `\"authorization\": { \"mode\": \"Webhook }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Webhook Authentication is enabled with `\"authentication\": { \"webhook\": { \"enabled\": true } }` in the API response.\n\n Verify that the Authorization Mode is set to `WebHook` with `\"authorization\": { \"mode\": \"Webhook }` in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "See the EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-authorization-mode-alwaysAllow", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Do not allow all requests. Enable explicit authorization.", - "remediation": "Change authorization mode to Webhook.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [\"authorization.mode\"],\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0174", - "name": "CIS-3.2.3 Ensure that a Client CA File is Configured", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Configure the client certificate authority file by setting the following parameter appropriately:\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--client-ca-file=\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that a client certificate authority file is configured. This may be configured using a command line argument to the kubelet service with `--client-ca-file` or in the kubelet configuration file via `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that a client certificate authority file is configured with `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"` in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "See the EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0175", - "name": "CIS-3.2.4 Ensure that the --read-only-port is disabled", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0\n\n \n```\n\"readOnlyPort\": 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For each remediation:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "read-only-port-enabled-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet has read-only port enabled.", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"reviewPaths\": [\"readOnlyPort\"],\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0176", - "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/pull/18552" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "See the EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-streaming-connection-idle-timeout", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if a kubelet has not disabled timeouts on streaming connections", - "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0177", - "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/" - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "See the EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-protect-kernel-defaults", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the --protect-kernel-defaults argument is set to true.", - "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"protectKernelDefaults\"],\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0178", - "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the above command includes the argument `--make-iptables-util-chains` then verify it is set to true.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains.:true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-ip-tables", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensures that the --make-iptables-util-chains argument is set to true.", - "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0179", - "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", - "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/issues/22063", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-hostname-override", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --hostname-override argument is not set.", - "remediation": "Unset the --hostname-override argument.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0180", - "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-event-qps", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", - "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"eventRecordQPS\"],\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0181", - "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not present or is set to true", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", - "references": [ - "https://github.com/kubernetes/kubernetes/pull/41912", - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration", - "https://kubernetes.io/docs/imported/release/notes/", - "https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "validate-kubelet-tls-configuration-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", - "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t# get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0183", - "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"featureGates\": {\n \"RotateKubeletServerCertificate\":true\n},\n\n```\n Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediation methods:**\nRestart the `kubelet` service and check status. The example below is for when using systemctl to manage services:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--rotate-kubelet-server-certificate` executable argument verify that it is set to true.\n\n If the process does not have the `--rotate-kubelet-server-certificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists in the `featureGates` section and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://github.com/kubernetes/kubernetes/pull/45059", - "https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-rotate-kubelet-server-certificate", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`RotateKubeletServerCertificate=true`, args[i])\n}\n" - } - ] - }, - { - "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "https://kubernetes.io/docs/admin/authorization/rbac/#user-facing-roles" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "cluster-admin-role", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\n# regal ignore:rule-length\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-create-pod", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can create pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - "https://aws.github.io/aws-eks-best-practices/iam/#disable-auto-mounting-of-service-account-tokens" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-default-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" - }, - { - "name": "namespace-without-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Namespace", - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace does not have service accounts (not incluiding default)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "CIS-4.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "controlID": "C-0191", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", - "references": [ - "https://www.impidio.com/blog/kubernetes-rbac-security-pitfalls", - "https://raesene.github.io/blog/2020/12/12/Escalating_Away/", - "https://raesene.github.io/blog/2021/01/16/Getting-Into-A-Bind-with-Kubernetes/" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-bind-escalate", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can or bind escalate roles/clusterroles", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "rule-can-impersonate-users-groups-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "controlID": "C-0205", - "name": "CIS-4.3.1 Ensure CNI plugin supports network policies.", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", - "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports network policies.", - "references": [ - "https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/", - "https://aws.github.io/aws-eks-best-practices/network/" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None.", - "default_value": "This will depend on the CNI plugin in use.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-cni-in-use-supports-network-policies", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [], - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" - } - ] - }, - { - "name": "CIS-4.3.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "https://kubernetes.io/docs/concepts/services-networking/networkpolicies/", - "https://octetz.com/posts/k8s-network-policy-apis", - "https://kubernetes.io/docs/tasks/configure-pod-container/declare-network-policy/" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "default_value": "By default, network policies are not created.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.4.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "category": { - "name": "Workload", - "subCategory": { - "name": "Secrets", - "id": "Cat-3" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-secrets-in-env-var", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if Pods have secrets in environment variables", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "CIS-4.6.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Amazon EKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "default_value": "By default, Kubernetes starts with two initial namespaces:\n\n 1. `default` - The default namespace for objects with no other namespace\n2. `kube-system` - The namespace for objects created by the Kubernetes system\n3. `kube-public` - The namespace for public-readable ConfigMap\n4. `kube-node-lease` - The namespace for associated lease object for each node", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-namespaces", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - } - ], - "ruleDependencies": [], - "description": "lists all namespaces for users to review", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.6.2 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" - ], - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - }, - { - "name": "immutable-container-filesystem", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" - }, - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - }, - { - "name": "drop-capability-netraw", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "set-seLinuxOptions", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if workload and container do not define any seLinuxOptions", - "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-seccomp-profile", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-procmount-default", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if container does not define securityContext.procMount to Default.", - "remediation": "Set securityContext.procMount to Default", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n" - }, - { - "name": "set-fsgroup-value", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n" - }, - { - "name": "set-fsgroupchangepolicy-value", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" - }, - { - "name": "set-sysctls-params", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.sysctls is not set.", - "remediation": "Set securityContext.sysctls params", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has sysctls set\n not pod.spec.securityContext.sysctls\n\n path := \"spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.sysctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has sysctls set\n not wl.spec.template.spec.securityContext.sysctls\n\n path := \"spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.sysctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has sysctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.sysctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "set-supplementalgroups-values", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.supplementalgroups is not set.", - "remediation": "Set securityContext.supplementalgroups values", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n\tnot pod.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n\tnot wl.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n\tnot cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n" - }, - { - "name": "rule-allow-privilege-escalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "CIS-4.6.3 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "rolebinding-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "role-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "configmap-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "endpoints-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Endpoints" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "persistentvolumeclaim-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PersistentVolumeClaim" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "podtemplate-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodTemplate" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "replicationcontroller-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ReplicationController" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "service-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "serviceaccount-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "endpointslice-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "discovery.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "EndpointSlice" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "horizontalpodautoscaler-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "autoscaling" - ], - "apiVersions": [ - "v2" - ], - "resources": [ - "HorizontalPodAutoscaler" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "lease-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "coordination.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Lease" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "csistoragecapacity-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "storage.k8s.io" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "CSIStorageCapacity" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ingress-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "poddisruptionbudget-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodDisruptionBudget" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-secret-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Secret" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - }, - { - "controlID": "C-0213", - "name": "CIS-4.2.1 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" - ], - "attributes": {}, - "baseScore": 8.0, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "psp-deny-privileged-container", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0214", - "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostpid", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0215", - "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostipc", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0216", - "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostnetwork", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0217", - "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-allowprivilegeescalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0218", - "name": "CIS-4.2.6 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-root-container", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" - } - ] - }, - { - "controlID": "C-0219", - "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-allowed-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0220", - "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-required-drop-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs don't have requiredDropCapabilities\n\t# if even one PSP has requiredDropCapabilities, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot has_requiredDropCapabilities(psp.spec)\n\t}\n\n\t# return al the PSPs that don't have requiredDropCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot has_requiredDropCapabilities(psp.spec)\n\n\tfixpath := {\"path\":\"spec.requiredDropCapabilities[0]\", \"value\":\"ALL\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' doesn't have requiredDropCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\nhas_requiredDropCapabilities(spec) {\n\tcount(spec.requiredDropCapabilities) > 0\n}\n" - } - ] - }, - { - "controlID": "C-0221", - "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", - "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", - "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ensure-image-scanning-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "DescribeRepositories" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Check if image scanning enabled for EKS\ndeny[msga] {\n\tdescribe_repositories := input[_]\n\tdescribe_repositories.apiVersion == \"eks.amazonaws.com/v1\"\n\tdescribe_repositories.kind == \"DescribeRepositories\"\n\tdescribe_repositories.metadata.provider == \"eks\"\n\trepos := describe_repositories.data.Repositories\n\tsome repo in repos\n\tnot image_scanning_configured(repo)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": \"image scanning is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": describe_repositories,\n\t\t},\n\t}\n}\n\nimage_scanning_configured(repo) {\n\trepo.ImageScanningConfiguration.ScanOnPush == true\n}" - } - ] - }, - { - "controlID": "C-0222", - "name": "CIS-5.1.2 Minimize user access to Amazon ECR", - "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-aws-policies-are-present", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "fails if aws policies are not found", - "remediation": "Implement policies to minimize user access to Amazon ECR", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# deny if policies are not present on AWS\ndeny[msg] {\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"Cluster has not policies to minimize access to Amazon ECR; Add some policy in order to minimize access on it.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": policies\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0223", - "name": "CIS-5.1.3 Minimize cluster access to read-only for Amazon ECR", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", - "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", - "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure_nodeinstancerole_has_right_permissions_for_ecr", - "attributes": { - "useFromKubescapeVersion": "v2.2.5" - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if a NodeInstanceRole has a policies not compliant with the following:\n# {\n# \"Version\": \"YYY-MM-DD\",\n# \"Statement\": [\n# {\n# \"Effect\": \"Allow\",\n# \"Action\": [\n# \"ecr:BatchCheckLayerAvailability\",\n# \"ecr:BatchGetImage\",\n# \"ecr:GetDownloadUrlForLayer\",\n# \"ecr:GetAuthorizationToken\"\n# ],\n# \"Resource\": \"*\"\n# }\n# ]\n# }\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"eks\"\n\n\trole_policies := resources.data.rolesPolicies\n\tnode_instance_role_policies := [key | role_policies[key]; contains(role_policies[key].PolicyRoles[_].RoleName, \"NodeInstance\")]\n\n\t# check if the policy satisfies the minimum prerequisites\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\t# node_instance_role_policies := [\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"]\n\tsome policy in node_instance_role_policies\n\t\tsome stat, _ in policies.data.policiesDocuments[policy].Statement\n\t\t\tnot isPolicyCompliant(policies, policy, stat)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Cluster has none read-only access to ECR; Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n\nisPolicyCompliant(policies, policy, stat) {\n\t# allowed action provided by the CIS\n\tallowed_actions := [\"ecr:BatchCheckLayerAvailability\",\n \t \"ecr:BatchGetImage\",\n \t \"ecr:GetAuthorizationToken\",\n \t \"ecr:GetDownloadUrlForLayer\"]\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Effect == \"Allow\"\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Resource == \"*\"\n\tsorted_actions := sort(policies.data.policiesDocuments[policy].Statement[stat].Action)\n\tsorted_actions == allowed_actions\n}\n" - } - ] - }, - { - "controlID": "C-0225", - "name": "CIS-5.2.1 Prefer using dedicated EKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", - "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-default-service-accounts-has-only-default-roles", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"deletePaths\": [sprintf(\"subjects[%d]\", [i])],\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "automount-default-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" - } - ] - }, - { - "controlID": "C-0226", - "name": "CIS-3.3.1 Prefer using a container-optimized OS when possible", - "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", - "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", - "remediation": "", - "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", - "references": [ - "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", - "https://aws.amazon.com/bottlerocket/" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", - "default_value": "A container-optimized OS is not the default.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "alert-container-optimized-os-not-in-use", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n\n# checks if a node is not using a \"Container-Optimized OS\". \n# \"Container-Optimized OS\" prefixes are configured in 'container_optimized_os_prefixes'. \n# deny if 'nodes.status.nodeInfo.osImage' not starting with at least one item in 'container_optimized_os_prefixes'.\ndeny[msga] {\n\n\tnodes := input[_]\n\tnodes.kind == \"Node\"\n\n\t# list of \"Container-Optimized OS\" images prefixes \n\tcontainer_optimized_os_prefixes = [\"Bottlerocket\"]\n\n\t# check if osImage starts with at least one prefix\n\tsome str in container_optimized_os_prefixes\n\tnot startswith(nodes.status.nodeInfo.osImage, str)\n\n\t# prepare message data.\n\talert_message := \"Prefer using Container-Optimized OS when possible\"\n\n\tfailedPaths:= [\"status.nodeInfo.osImage\"]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [nodes]\n\t\t}\n\t}\n}" - } - ] - }, - { - "controlID": "C-0227", - "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": {}, - "baseScore": 8.0, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", - "default_value": "By default, Endpoint Public Access is disabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-endpointprivateaccess-is-enabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "controlID": "C-0228", - "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "Check for private endpoint access to the Kubernetes API server", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": {}, - "baseScore": 8.0, - "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", - "default_value": "By default, the Public Endpoint is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled or EndpointPublicAccess is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\t\t\n\tis_endpointaccess_misconfigured(config)\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled, or EndpointPublicAccess is enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs='203.0.113.5/32'\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n# check if EndpointPrivateAccess is disabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false\n}\n\n# check if EndpointPublicAccess is enabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n}\n\n" - } - ] - }, - { - "controlID": "C-0229", - "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", - "manual_test": "", - "references": [], - "attributes": {}, - "baseScore": 8.0, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Check if EndpointPublicAccess in enabled on a private node for EKS. A private node is a node with no public ips access.\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n\n\t# filter out private nodes\n\t\"0.0.0.0/0\" in config.Cluster.ResourcesVpcConfig.PublicAccessCidrs\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPublicAccess is enabled on a private node\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "controlID": "C-0230", - "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", - "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", - "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", - "remediation": "", - "manual_test": "", - "references": [], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-network-policy-is-enabled-eks", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# EKS supports Calico and Cilium add-ons, both supports Network Policy.\n# Deny if at least on of them is not in the list of CNINames.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfos(obj)\n\n\tnot \"Calico\" in obj.data.CNINames\n\tnot \"Cilium\" in obj.data.CNINames\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfos(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0231", - "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-https-loadbalancers-encrypted-with-tls-aws", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "ruleDependencies": [], - "relevantCloudProviders": [ - "EKS" - ], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# deny LoadBalancer services that are configured for ssl connection (port: 443), but don't have TLS certificate set.\ndeny[msga] {\n\n\twl_kind := \"Service\"\n\twl_type := \"LoadBalancer\"\n\twl_required_annotation := \"service.beta.kubernetes.io/aws-load-balancer-ssl-cert\"\n\n\t# filterring LoadBalancers\n\twl := \tinput[_]\n\twl.kind == wl_kind\n\twl.spec.type == wl_type\n\n\t# filterring loadbalancers with port 443.\n\twl.spec.ports[_].port == 443\n\n\t# filterring annotations without ssl cert confgiured.\n\tannotations := object.get(wl, [\"metadata\", \"annotations\"], [])\n\tssl_cert_annotations := [annotations[i] | annotation = i; startswith(i, wl_required_annotation)]\n\tcount(ssl_cert_annotations) == 0\n\n\t# prepare message data.\n\talert_message := sprintf(\"LoadBalancer '%v' has no TLS configured\", [wl.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\": sprintf(\"metadata.annotations['%v']\", [wl_required_annotation]), \"value\": \"AWS_LOADBALANCER_SSL_CERT\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wl\n\t\t}\n\t}\n}\n\n", - "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\tobj := input[_]\n\tobj.kind == \"Service\"\n\tobj.spec.type == \"LoadBalancer\"\n\tmsga := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n" - } - ] - }, - { - "controlID": "C-0232", - "name": "CIS-5.5.1 Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", - "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", - "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", - "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", - "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", - "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "review-roles-with-aws-iam-authenticator", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresource.kind == \"Role\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"For namespace '%v', make sure Kubernetes RBAC users are managed with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156\", [resource.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resource\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0233", - "name": "CIS-5.6.1 Consider Fargate for running untrusted workloads", - "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", - "long_description": "", - "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "", - "default_value": "By default, AWS Fargate is not utilized.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "alert-fargate-not-in-use", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n\n\n# deny if fargate is not being used in any of the nodes in cluster.\n# a Node is identified as using fargate if it's name starts with 'fargate'.\ndeny[msga] {\n\n\n # get all nodes\n nodes := [node | node = input[_]; node.kind == \"Node\"]\n count(nodes) > 0\n\n # get all nodes without fargate\n nodes_not_fargate := [node | node = nodes[_]; not startswith(node.metadata.name, \"fargate\")]\n\n # if count of all nodes equals to count of nodes_not_fargate it means fargate is not being used.\n count(nodes) == count(nodes_not_fargate)\n\n\t# prepare message data.\n\talert_message := \"Consider Fargate for running untrusted workloads\"\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": nodes_not_fargate\n\t\t}\n\t}\n}" - } - ] - }, - { - "controlID": "C-0234", - "name": "CIS-4.4.2 Consider external secret storage", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "manual_test": "Review your secrets management implementation.", - "references": [], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-external-secrets-storage-is-in-use", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "relevantCloudProviders": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.kubernetes.api.client\n\n# deny workloads that doesn't support external service provider (secretProviderClass)\n# reference - https://secrets-store-csi-driver.sigs.k8s.io/concepts.html\ndeny[msga] {\n\n resources := input[_]\n\n\t# get volume paths for each resource\n\tvolumes_path := get_volumes_path(resources)\n\n\t# get volumes for each resources\n\tvolumes := object.get(resources, volumes_path, [])\n\n\t# continue if secretProviderClass not found in resource\n\thaving_secretProviderClass := {i | volumes[i].csi.volumeAttributes.secretProviderClass}\n \tcount(having_secretProviderClass) == 0\n\n\n\t# prepare message data.\n\talert_message := sprintf(\"%s: %v is not using external secret storage\", [resources.kind, resources.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\":sprintf(\"%s[0].csi.volumeAttributes.secretProviderClass\",[concat(\".\", volumes_path)]), \"value\":\"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\n}\n\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n" - } - ] - }, - { - "controlID": "C-0235", - "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0238", - "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", - "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0242", - "name": "CIS-5.6.2 Hostile multi-tenant workloads", - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "rule-hostile-multitenant-workloads", - "attributes": { - "actionRequired": "manual review" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "ruleDependencies": [], - "configInputs": [], - "controlConfigInputs": [], - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", - "remediation": "Use physically isolated clusters", - "ruleQuery": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" - } - ] - }, - { - "controlID": "C-0246", - "name": "CIS-4.1.7 Avoid use of system:masters group", - "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", - "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", - "remediation": "Remove the `system:masters` group from all users in the cluster.", - "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", - "references": [ - "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", - "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rule-manual", - "attributes": { - "actionRequired": "manual review", - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", - "remediation": "", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" - } - ] - } - ], - "ControlsIDs": [ - "C-0066", - "C-0067", - "C-0078", - "C-0167", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0183", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191", - "C-0205", - "C-0206", - "C-0207", - "C-0209", - "C-0211", - "C-0212", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0220", - "C-0221", - "C-0222", - "C-0223", - "C-0225", - "C-0226", - "C-0227", - "C-0228", - "C-0229", - "C-0230", - "C-0231", - "C-0232", - "C-0233", - "C-0234", - "C-0235", - "C-0238", - "C-0242", - "C-0246" - ] -} \ No newline at end of file diff --git a/releaseDev/cis-v1.23-t1.0.1.json b/releaseDev/cis-v1.23-t1.0.1.json deleted file mode 100644 index 8d7738c6b..000000000 --- a/releaseDev/cis-v1.23-t1.0.1.json +++ /dev/null @@ -1,8583 +0,0 @@ -{ - "name": "cis-v1.23-t1.0.1", - "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", - "attributes": { - "armoBuiltin": true, - "version": "v1.0.1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "1": { - "id": "1", - "name": "Control Plane Components", - "subSections": { - "1": { - "id": "1.1", - "name": "Control Plane Node Configuration Files", - "controlsIDs": [ - "C-0092", - "C-0093", - "C-0094", - "C-0095", - "C-0096", - "C-0097", - "C-0098", - "C-0099", - "C-0100", - "C-0101", - "C-0102", - "C-0103", - "C-0104", - "C-0105", - "C-0106", - "C-0107", - "C-0108", - "C-0109", - "C-0110", - "C-0111", - "C-0112" - ] - }, - "2": { - "id": "1.2", - "name": "API Server", - "controlsIDs": [ - "C-0113", - "C-0114", - "C-0115", - "C-0116", - "C-0117", - "C-0118", - "C-0119", - "C-0120", - "C-0121", - "C-0122", - "C-0123", - "C-0124", - "C-0125", - "C-0126", - "C-0127", - "C-0128", - "C-0129", - "C-0130", - "C-0131", - "C-0132", - "C-0133", - "C-0134", - "C-0135", - "C-0136", - "C-0137", - "C-0138", - "C-0139", - "C-0140", - "C-0141", - "C-0142", - "C-0143" - ] - }, - "3": { - "id": "1.3", - "name": "Controller Manager", - "controlsIDs": [ - "C-0144", - "C-0145", - "C-0146", - "C-0147", - "C-0148", - "C-0149", - "C-0150" - ] - }, - "4": { - "id": "1.4", - "name": "Scheduler", - "controlsIDs": [ - "C-0151", - "C-0152" - ] - } - } - }, - "2": { - "name": "etcd", - "id": "2", - "controlsIDs": [ - "C-0153", - "C-0154", - "C-0155", - "C-0156", - "C-0157", - "C-0158", - "C-0159" - ] - }, - "3": { - "name": "Control Plane Configuration", - "id": "3", - "subSections": { - "2": { - "name": "Logging", - "id": "3.2", - "controlsIDs": [ - "C-0160", - "C-0161" - ] - } - } - }, - "4": { - "name": "Worker Nodes", - "id": "4", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "4.1", - "controlsIDs": [ - "C-0162", - "C-0163", - "C-0164", - "C-0165", - "C-0166", - "C-0167", - "C-0168", - "C-0169", - "C-0170", - "C-0171" - ] - }, - "2": { - "name": "Kubelet", - "id": "4.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0182", - "C-0183", - "C-0184" - ] - } - } - }, - "5": { - "name": "Policies", - "id": "5", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "5.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191" - ] - }, - "2": { - "name": "Pod Security Standards", - "id": "5.2", - "controlsIDs": [ - "C-0192", - "C-0193", - "C-0194", - "C-0195", - "C-0196", - "C-0197", - "C-0198", - "C-0199", - "C-0200", - "C-0201", - "C-0202", - "C-0203", - "C-0204" - ] - }, - "3": { - "name": "Network Policies and CNI", - "id": "5.3", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "4": { - "name": "Secrets Management", - "id": "5.4", - "controlsIDs": [ - "C-0207", - "C-0208" - ] - }, - "7": { - "name": "General Policies", - "id": "5.7", - "controlsIDs": [ - "C-0209", - "C-0210", - "C-0211", - "C-0212" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "controlID": "C-0092", - "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0093", - "name": "CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0094", - "name": "CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0095", - "name": "CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0096", - "name": "CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0097", - "name": "CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0098", - "name": "CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0099", - "name": "CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0100", - "name": "CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0101", - "name": "CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0102", - "name": "CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory has permissions of `755`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 448 # == 0o700\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0103", - "name": "CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0104", - "name": "CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, admin.conf has permissions of `600`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0105", - "name": "CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0106", - "name": "CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0107", - "name": "CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0108", - "name": "CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0109", - "name": "CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0110", - "name": "CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0111", - "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".crt\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0112", - "name": "CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".key\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0113", - "name": "CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the API server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable anonymous requests to the API server.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0114", - "name": "CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", - "description": "Do not use token based authentication.", - "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not use token based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.\n\n#### Impact Statement\nYou will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server TLS is not configured\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tre := \" ?--token-auth-file=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd[i], -1)\n\tcount(matchs) > 0\n\tfixed = replace(cmd[i], matchs[0][0], \"\")\n\tresult = get_result(sprintf(\"spec.containers[0].command[%d]\", [i]), fixed)\n}\n\n# Get fix and failed paths\nget_result(path, fixed) = result {\n\tfixed == \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(path, fixed) = result {\n\tfixed != \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed,\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0115", - "name": "CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"DenyServiceExternalIPs\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0116", - "name": "CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", - "description": "Enable certificate based kubelet authentication.", - "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, certificate-based kubelet authentication is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable certificate based kubelet authentication.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, certificate-based kubelet authentication is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"certificate based kubelet authentication is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t\"--kubelet-client-certificate\",\n\t\t\"--kubelet-client-key\",\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=\", [wanted[i]]),\n\t} |\n\t\twanted[i]\n\t\tnot contains(full_cmd, wanted[i])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0117", - "name": "CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", - "description": "Verify kubelet's certificate before establishing connection.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Verify kubelet's certificate before establishing connection.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, `--kubelet-certificate-authority` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority file is not specified\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--kubelet-certificate-authority\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--kubelet-certificate-authority=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0118", - "name": "CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not always authorize all requests.", - "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Only authorized requests will be served.", - "default_value": "By default, `AlwaysAllow` is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not always authorize all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```\n\n#### Impact Statement\nOnly authorized requests will be served.\n\n#### Default Value\nBy default, `AlwaysAllow` is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"AlwaysAllow authorization mode is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# Check if include AlwaysAllow\n\t\"AlwaysAllow\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val = flag.values[_]; val != \"AlwaysAllow\"]\n\tfixed_flag = get_fixed_flag(fixed_values)\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\n\nget_fixed_flag(values) = fixed {\n\tcount(values) == 0\n\tfixed = \"--authorization-mode=RBAC\" # If no authorization-mode, set it to RBAC, as recommended by CIS\n}\nget_fixed_flag(values) = fixed {\n\tcount(values) > 0\n\tfixed = sprintf(\"--authorization-mode=%s\", [concat(\",\", values)])\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0119", - "name": "CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, `Node` authorization is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `Node` authorization is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubelet nodes can read objects that are not associated with them\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"Node\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"Node\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=Node\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0120", - "name": "CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", - "description": "Turn on Role Based Access Control.", - "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", - "default_value": "By default, `RBAC` authorization is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Turn on Role Based Access Control.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nWhen RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.\n\n#### Default Value\nBy default, `RBAC` authorization is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"RBAC\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"RBAC\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=RBAC\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0121", - "name": "CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", - "description": "Limit the rate at which the API server accepts requests.", - "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "You need to carefully tune in limits as per your environment.", - "default_value": "By default, `EventRateLimit` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Limit the rate at which the API server accepts requests.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```\n\n#### Impact Statement\nYou need to carefully tune in limits as per your environment.\n\n#### Default Value\nBy default, `EventRateLimit` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"EventRateLimit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"EventRateLimit\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=EventRateLimit\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0122", - "name": "CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", - "description": "Do not allow all requests.", - "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", - "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not allow all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.\n\n#### Impact Statement\nOnly requests explicitly allowed by the admissions control plugins would be served.\n\n#### Default Value\n`AlwaysAdmit` is not in the list of default admission plugins.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\t\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"AlwaysAdmit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"AlwaysAdmit\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0123", - "name": "CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", - "description": "Always pull images.", - "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" - ], - "attributes": {}, - "baseScore": 4, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", - "default_value": "By default, `AlwaysPullImages` is not set.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Always pull images.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```\n\n#### Impact Statement\nCredentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.\n\n#### Default Value\nBy default, `AlwaysPullImages` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"Admission control policy is not set to AlwaysPullImages\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"AlwaysPullImages\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"AlwaysPullImages\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=AlwaysPullImages\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0124", - "name": "CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", - "default_value": "By default, `SecurityContextDeny` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```\n\n#### Impact Statement\nThis admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies\n\n#### Default Value\nBy default, `SecurityContextDeny` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\":\"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster\", \n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"SecurityContextDeny\" in flag.values\n\tnot \"PodSecurityPolicy\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"SecurityContextDeny\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=SecurityContextDeny\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0125", - "name": "CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", - "description": "Automate service accounts management.", - "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "None.", - "default_value": "By default, `ServiceAccount` is set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Automate service accounts management.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.\n\n#### Impact Statement\nNone.\n\n#### Default Value\nBy default, `ServiceAccount` is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"ServiceAccount\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"ServiceAccount\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0126", - "name": "CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", - "description": "Reject creating objects in a namespace that is undergoing termination.", - "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "None", - "default_value": "By default, `NamespaceLifecycle` is set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Reject creating objects in a namespace that is undergoing termination.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NamespaceLifecycle` is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"NamespaceLifecycle\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"NamespaceLifecycle\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0127", - "name": "CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `NodeRestriction` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NodeRestriction` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"NodeRestriction is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"NodeRestriction\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"NodeRestriction\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=NodeRestriction\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0128", - "name": "CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", - "description": "Do not disable the secure port.", - "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "You need to set the API Server up with the right TLS certificates.", - "default_value": "By default, port 6443 is used as the secure port.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not disable the secure port.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.\n\n#### Impact Statement\nYou need to set the API Server up with the right TLS certificates.\n\n#### Default Value\nBy default, port 6443 is used as the secure port.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tcontains(obj.spec.containers[0].command[i], \"--secure-port=0\")\n\tmsg := {\n\t\t\"alertMessage\": \"the secure port is disabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"failedPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0129", - "name": "CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled. This could potentially be exploited to uncover system and program details.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--profiling=true\")\n\tfixed = replace(cmd[i], \"--profiling=true\", \"--profiling=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0130", - "name": "CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubernetes API Server is not audited\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-path\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-path=/var/log/apiserver/audit.log\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0131", - "name": "CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", - "description": "Retain the logs for at least 30 days or as appropriate.", - "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Retain the logs for at least 30 days or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_value(cmd) = {\"origin\": origin, \"value\": value} {\n\tre := \" ?--audit-log-maxage=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalue = to_number(matchs[0][1])\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag = get_flag_value(cmd[i])\n\tflag.value < 30\n\tfixed = replace(cmd[i], flag.origin, \"--audit-log-maxage=30\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"alert\": sprintf(\"Audit log retention period is %v days, which is too small (should be at least 30 days)\", [flag.value]),\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxage\")\n\tresult = {\n\t\t\"alert\": \"Audit log retention period is not set\",\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%v]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-maxage=30\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0132", - "name": "CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", - "description": "Retain 10 or an appropriate number of old log files.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Retain 10 or an appropriate number of old log files.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxbackup\")\n\tresult = {\n\t\t\"alert\": \"Please validate that the audit log max backup is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxbackup\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max backup is not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxbackup=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0133", - "name": "CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", - "attributes": { - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxsize\")\n\tresult = {\n\t\t\"alert\": \"Please validate that audit-log-maxsize has an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxsize\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max size not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxsize=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0134", - "name": "CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", - "description": "Set global request timeout for API server requests as appropriate.", - "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--request-timeout` is set to 60 seconds.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Set global request timeout for API server requests as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--request-timeout` is set to 60 seconds.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--request-timeout\")\n\tresult = {\n\t\t\"alert\": \"Please validate the request timeout flag is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0135", - "name": "CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", - "description": "Validate service account before validating token.", - "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `--service-account-lookup` argument is set to `true`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Validate service account before validating token.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--service-account-lookup` argument is set to `true`.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) := invalid_flags[0] {\n\tinvalid_flags := [flag |\n\t\tsome i, c in cmd\n\t\tflag := get_result(c, i)\n\t]\n}\n\nget_result(cmd, i) = result {\n\tcmd == \"--service-account-lookup=false\"\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(cmd, i) = result {\n\tcmd != \"--service-account-lookup=false\"\n\tcontains(cmd, \"--service-account-lookup=false\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": replace(cmd, \"--service-account-lookup=false\", \"--service-account-lookup=true\"),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0136", - "name": "CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-key-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", - "attributes": { - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```\n\n#### Impact Statement\nThe corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-key-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-key-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0137", - "name": "CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"etcd is not configured to use TLS properly\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--etcd-certfile\", \"\"],\n\t\t[\"--etcd-keyfile\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0138", - "name": "CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to serve only HTTPS traffic\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--tls-cert-file\", \"\"],\n\t\t[\"--tls-private-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0139", - "name": "CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--client-ca-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server communication is not encrypted properly\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-ca-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0140", - "name": "CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-cafile` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-cafile` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to use SSL Certificate Authority file for etcd\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--etcd-cafile\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--etcd-cafile=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0141", - "name": "CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", - "description": "Encrypt etcd key-value store.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `--encryption-provider-config` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Encrypt etcd key-value store.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--encryption-provider-config` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is not set at all\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\n\tcmd := obj.spec.containers[0].command\n\tnot contains(concat(\" \", cmd), \"--encryption-provider-config\")\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config file not set\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--encryption-provider-config=\",\n\t\t}],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n# Encryption config is set but not covering secrets\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# Check if the config conver secrets\n\tcount({true | \"secrets\" in config_file_content.resources[_].resources}) == 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not covering secrets\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tfilter_input(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nfilter_input(obj){\n\tis_api_server(obj)\n}\nfilter_input(obj){\n\tis_control_plane_info(obj)\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0142", - "name": "CIS-1.2.30 Ensure that encryption providers are appropriately configured", - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, no encryption provider is set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, no encryption provider is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is set but not using one of the recommended providers\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# For each resource check if it does not have allowed provider\n\tfix_paths := [{\n\t\t\"path\": sprintf(\"resources[%d].providers[%d]\", [i, count(resource.providers)]),\n\t\t\"value\": \"{\\\"aescbc\\\" | \\\"secretbox\\\" | \\\"kms\\\" : }\", # must be string\n\t} |\n\t\tresource := config_file_content.resources[i]\n\t\tcount({true |\n\t\t\tsome provider in resource.providers\n\t\t\thas_one_of_keys(provider, [\"aescbc\", \"secretbox\", \"kms\"])\n\t\t}) == 0\n\t]\n\n\tcount(fix_paths) > 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using one of the allowed providers (aescbc, secretbox, kms)\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_key(x, k) {\n\t_ = x[k]\n}\n\nhas_one_of_keys(x, keys) {\n\thas_key(x, keys[_])\n}\n" - } - ] - }, - { - "controlID": "C-0143", - "name": "CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\twanted = [\n\t\t\"TLS_AES_128_GCM_SHA256\",\n\t\t\"TLS_AES_256_GCM_SHA384\",\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t]\n\tresult = invalid_flag(obj.spec.containers[0].command, wanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, wanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tmissing = [x | x = wanted[_]; not x in flag.values]\n\tcount(missing) > 0\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, missing)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0144", - "name": "CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", - "description": "Activate garbage collector on pod termination, as appropriate.", - "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Activate garbage collector on pod termination, as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--terminated-pod-gc-threshold` is set to `12500`.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--terminated-pod-gc-threshold\")\n\tresult = {\n\t\t\"alert\": \"Please validate that --terminated-pod-gc-threshold is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--terminated-pod-gc-threshold\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"--terminated-pod-gc-threshold flag not set to an appropriate value\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--terminated-pod-gc-threshold=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0145", - "name": "CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-controller-manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0146", - "name": "CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", - "description": "Use individual service account credentials for each controller.", - "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" - ], - "attributes": {}, - "baseScore": 4, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", - "default_value": "By default, `--use-service-account-credentials` is set to false.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Use individual service account credentials for each controller.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```\n\n#### Impact Statement\nWhatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.\n\n#### Default Value\nBy default, `--use-service-account-credentials` is set to false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"--use-service-account-credentials is set to false in the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--use-service-account-credentials=false\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--use-service-account-credentials=true\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--use-service-account-credentials\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--use-service-account-credentials=true\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0147", - "name": "CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-private-key-file` it not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```\n\n#### Impact Statement\nYou would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-private-key-file` it not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token can not be rotated as needed\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-private-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-private-key-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0148", - "name": "CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "You need to setup and maintain root certificate authority file.", - "default_value": "By default, `--root-ca-file` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```\n\n#### Impact Statement\nYou need to setup and maintain root certificate authority file.\n\n#### Default Value\nBy default, `--root-ca-file` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--root-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--root-ca-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0149", - "name": "CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation on controller-manager.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable kubelet server certificate rotation on controller-manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"`RotateKubeletServerCertificate` is set to false on the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"RotateKubeletServerCertificate=false\")\n\tfixed = replace(cmd[i], \"RotateKubeletServerCertificate=false\", \"RotateKubeletServerCertificate=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0150", - "name": "CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the Controller Manager API service is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue =matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0151", - "name": "CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-scheduler\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" - } - ] - }, - { - "controlID": "C-0152", - "name": "CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the kube scheduler is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue = matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" - } - ] - }, - { - "controlID": "C-0153", - "name": "CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", - "description": "Configure TLS encryption for the etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Client connections only over TLS would be served.", - "default_value": "By default, TLS encryption is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "etcd-tls-enabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Configure TLS encryption for the etcd service.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```\n\n#### Impact Statement\nClient connections only over TLS would be served.\n\n#### Default Value\nBy default, TLS encryption is not set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if tls is configured in a etcd service\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--cert-file\", \"\"],\n\t\t[\"--key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0154", - "name": "CIS-2.2 Ensure that the --client-cert-auth argument is set to true", - "description": "Enable client authentication on etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", - "default_value": "By default, the etcd service can be queried by unauthenticated clients.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "etcd-client-auth-cert", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Enable client authentication on etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```\n\n#### Impact Statement\nAll clients attempting to access the etcd server will require a valid client certificate.\n\n#### Default Value\nBy default, the etcd service can be queried by unauthenticated clients.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--client-cert-auth=false\", \"--client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0155", - "name": "CIS-2.3 Ensure that the --auto-tls argument is not set to true", - "description": "Do not use self-signed certificates for TLS.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", - "default_value": "By default, `--auto-tls` is set to `false`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "etcd-auto-tls-disabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Do not use self-signed certificates for TLS.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```\n\n#### Impact Statement\nClients will not be able to use self-signed certificates for TLS.\n\n#### Default Value\nBy default, `--auto-tls` is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Auto tls is enabled. Clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--auto-tls=true\")\n\tfixed = replace(cmd[i], \"--auto-tls=true\", \"--auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0156", - "name": "CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "etcd-peer-tls-enabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```\n\n#### Impact Statement\netcd cluster peers would need to set up TLS for their communication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if peer tls is enabled in etcd cluster\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd encryption for peer connection is not enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--peer-cert-file\", \"\"],\n\t\t[\"--peer-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [\"spec.containers[0].command\"],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0157", - "name": "CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", - "description": "etcd should be configured for peer authentication.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", - "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "etcd-peer-client-auth-cert", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "etcd should be configured for peer authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--peer-client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--peer-client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--peer-client-cert-auth=false\", \"--peer-client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0158", - "name": "CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "etcd-peer-auto-tls-disabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-auto-tls=true\")\n\tfixed = replace(cmd[i], \"--peer-auto-tls=true\", \"--peer-auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0159", - "name": "CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", - "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", - "default_value": "By default, no etcd certificate is created and used.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "etcd-unique-ca", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```\n\n#### Impact Statement\nAdditional management of the certificates and keys for the dedicated certificate authority will be required.\n\n#### Default Value\nBy default, no etcd certificate is created and used.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 2.7 https://workbench.cisecurity.org/sections/1126654/recommendations/1838578\n\ndeny[msga] {\n\tetcdPod := [pod | pod := input[_]; filter_input(pod, \"etcd\")]\n\tetcdCheckResult := get_argument_value_with_path(etcdPod[0].spec.containers[0].command, \"--trusted-ca-file\")\n\n\tapiserverPod := [pod | pod := input[_]; filter_input(pod, \"kube-apiserver\")]\n\tapiserverCheckResult := get_argument_value_with_path(apiserverPod[0].spec.containers[0].command, \"--client-ca-file\")\n\n\tetcdCheckResult.value == apiserverCheckResult.value\n\tmsga := {\n\t\t\"alertMessage\": \"Cert file is the same both for the api server and the etcd\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"failedPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"fixPaths\": [etcdCheckResult.fix_paths, apiserverCheckResult.fix_paths],\n\t\t\"alertObject\": {\"k8sApiObjects\": [etcdPod[0], apiserverPod[0]]},\n\t}\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"kube-apiserver\")\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"etcd\")\n}\n\nfilter_input(obj, res) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], res)\n}\n\nget_argument_value(command, argument) = value {\n\targs := split(command, \"=\")\n\tsome i, sprintf(\"%v\", [argument]) in args\n\tvalue := args[i + 1]\n}\n\nget_argument_value_with_path(cmd, argument) = result {\n\tcontains(cmd[i], argument)\n\targumentValue := get_argument_value(cmd[i], argument)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"path\": path,\n\t\t\"value\": argumentValue,\n\t\t\"fix_paths\": {\"path\": path, \"value\": \"\"},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0160", - "name": "CIS-3.2.1 Ensure that a minimal audit policy is created", - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", - "remediation": "Create an audit policy file for your cluster.", - "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", - "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-native-cis", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# CIS 3.2.1 https://workbench.cisecurity.org/sections/1126657/recommendations/1838582\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server(obj)\n\tcmd := obj.spec.containers[0].command\n\taudit_policy := [command | command := cmd[_]; contains(command, \"--audit-policy-file=\")]\n\tcount(audit_policy) < 1\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0161", - "name": "CIS-3.2.2 Ensure that the audit policy covers key security concerns", - "description": "Ensure that the audit policy created for the cluster covers key security concerns.", - "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", - "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", - "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", - "default_value": "By default Kubernetes clusters do not log audit information.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "audit-policy-content", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "APIServerInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\nimport future.keywords.in\n\n# CIS 3.2.2 https://workbench.cisecurity.org/sections/1126657/recommendations/1838583\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\tapi_server_info := obj.data.APIServerInfo\n\n\tnot contains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": api_server_info.cmdLine,\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\n\tapi_server_info := obj.data.APIServerInfo\n\n\tcontains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\trawPolicyFile := api_server_info.auditPolicyFile\n\tpolicyFile = yaml.unmarshal(base64.decode(rawPolicyFile.content))\n\n\tare_audit_file_rules_valid(policyFile.rules)\n\n\tfailed_obj := json.patch(policyFile, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"metadata\",\n\t\t\"value\": {\"name\": sprintf(\"%s - Audit policy file\", [obj.metadata.name])},\n\t}])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit policy rules do not cover key security areas or audit levels are invalid\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\n# Sample rules object\n# rules:\n# - level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nare_audit_file_rules_valid(rules) if {\n\tseeked_resources_with_audit_level := {\n\t\t\"secrets\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"configmaps\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"tokenreviews\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"pods\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"deployments\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/exec\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/portforward\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"services/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t}\n\n\t# Policy file must contain every resource\n\tsome resource, config in seeked_resources_with_audit_level\n\n\t# Every seeked resource mu have valid audit levels\n\tnot test_all_rules_against_one_seeked_resource(resource, config, rules)\n}\n\ntest_all_rules_against_one_seeked_resource(seeked_resource, value_of_seeked_resource, rules) if {\n\t# Filter down rules to only those concerning a seeked resource\n\trules_with_seeked_resource := [rule | rule := rules[_]; is_rule_concering_seeked_resource(rule, seeked_resource)]\n\trules_count := count(rules_with_seeked_resource)\n\n\t# Move forward only if there are some\n\trules_count > 0\n\n\t# Check if rules concerning seeked resource have valid audit levels\n\tvalid_rules := [rule | rule := rules_with_seeked_resource[_]; validate_rule_audit_level(rule, value_of_seeked_resource)]\n\tvalid_rules_count := count(valid_rules)\n\n\tvalid_rules_count > 0\n\n\t# Compare all rules for that specififc resource with those with valid rules, if amount of them differs,\n\t# it means that there are also some rules which invalid audit level\n\tvalid_rules_count == rules_count\n}\n\nis_rule_concering_seeked_resource(rule, seeked_resource) if {\n\tseeked_resource in rule.resources[_].resources\n}\n\n# Sample single rule:\n# \t level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nvalidate_rule_audit_level(rule, value_of_seeked_resource) := result if {\n\tvalue_of_seeked_resource.mode == \"equal\"\n\tresult := rule.level == value_of_seeked_resource.auditLevel\n} else := result {\n\tresult := rule.level != value_of_seeked_resource.auditLevel\n}\n\nis_api_server_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}" - } - ] - }, - { - "controlID": "C-0162", - "name": "CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kubelet` service file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0163", - "name": "CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0164", - "name": "CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, proxy file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeProxyInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0165", - "name": "CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `proxy` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeProxyInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0166", - "name": "CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file has permissions of `600`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0167", - "name": "CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0168", - "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0169", - "name": "CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0170", - "name": "CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0171", - "name": "CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0172", - "name": "CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0173", - "name": "CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-authorization-mode-alwaysAllow", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Do not allow all requests. Enable explicit authorization.", - "remediation": "Change authorization mode to Webhook.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [\"authorization.mode\"],\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0174", - "name": "CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0175", - "name": "CIS-4.2.4 Verify that the --read-only-port argument is set to 0", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "read-only-port-enabled-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet has read-only port enabled.", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"reviewPaths\": [\"readOnlyPort\"],\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0176", - "name": "CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-streaming-connection-idle-timeout", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if a kubelet has not disabled timeouts on streaming connections", - "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0177", - "name": "CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "By default, `--protect-kernel-defaults` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-protect-kernel-defaults", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the --protect-kernel-defaults argument is set to true.", - "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"protectKernelDefaults\"],\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0178", - "name": "CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-ip-tables", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensures that the --make-iptables-util-chains argument is set to true.", - "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0179", - "name": "CIS-4.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", - "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", - "default_value": "By default, `--hostname-override` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-hostname-override", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --hostname-override argument is not set.", - "remediation": "Unset the --hostname-override argument.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0180", - "name": "CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "By default, `--event-qps` argument is set to `5`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-event-qps", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", - "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"eventRecordQPS\"],\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0181", - "name": "CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the Kubelets.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "validate-kubelet-tls-configuration-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", - "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t# get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0182", - "name": "CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", - "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet client certificate rotation is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-rotate-certificates", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --rotate-certificates argument is not set to false.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"rotateCertificates\"],\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0183", - "name": "CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet server certificate rotation is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-rotate-kubelet-server-certificate", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`RotateKubeletServerCertificate=true`, args[i])\n}\n" - } - ] - }, - { - "controlID": "C-0184", - "name": "CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "kubelet-strong-cryptographics-ciphers", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the Kubelet is configured to only use strong cryptographic ciphers.", - "remediation": "Change --tls-cipher-suites value of TLSCipherSuites property of config file to use strong cryptographics ciphers", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.13 https://workbench.cisecurity.org/sections/1126668/recommendations/1838663\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--tls-cipher-suites\")\n\n\tnot has_strong_cipher_set_via_cli(command)\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.TLSCipherSuites\n\n\tnot is_value_in_strong_cliphers_set(yamlConfig.TLSCipherSuites)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [\"TLSCipherSuites\"],\n\t\t\"failedPaths\": [\"TLSCipherSuites\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\nhas_strong_cipher_set_via_cli(command) {\n\tcontains(command, \"--tls-cipher-suites=\")\n\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome i\n\tcontains(command, sprintf(\"%v%v\", [\"--tls-cipher-suites=\", strong_cliphers[i]]))\n}\n\nis_value_in_strong_cliphers_set(value) {\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome x\n\tstrong_cliphers[x] == value\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "CIS-5.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "cluster-admin-role", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\n# regal ignore:rule-length\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-5.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-5.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-create-pod", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can create pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-5.1.5 Ensure that default service accounts are not actively used", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-default-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" - }, - { - "name": "namespace-without-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Namespace", - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace does not have service accounts (not incluiding default)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "controlID": "C-0191", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-bind-escalate", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can or bind escalate roles/clusterroles", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "rule-can-impersonate-users-groups-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "controlID": "C-0192", - "name": "CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", - "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", - "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", - "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", - "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", - "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - }, - { - "name": "pod-security-admission-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "controlID": "C-0193", - "name": "CIS-5.2.2 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of privileged containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-baseline-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0194", - "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-baseline-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0195", - "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-baseline-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0196", - "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-baseline-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0197", - "name": "CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-restricted-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-restricted-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0198", - "name": "CIS-5.2.7 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-restricted-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-restricted-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0199", - "name": "CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", - "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-baseline-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0200", - "name": "CIS-5.2.9 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, there are no restrictions on adding capabilities to containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-restricted-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-restricted-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0201", - "name": "CIS-5.2.10 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-restricted-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-restricted-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0202", - "name": "CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", - "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", - "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-baseline-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0203", - "name": "CIS-5.2.12 Minimize the admission of HostPath volumes", - "description": "Do not generally admit containers which make use of `hostPath` volumes.", - "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-baseline-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0204", - "name": "CIS-5.2.13 Minimize the admission of containers which use HostPorts", - "description": "Do not generally permit containers which require the use of HostPorts.", - "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the use of HostPorts.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "pod-security-admission-baseline-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0205", - "name": "CIS-5.3.1 Ensure that the CNI in use supports Network Policies", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "This will depend on the CNI plugin in use.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-that-the-cni-in-use-supports-network-policies", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [], - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" - } - ] - }, - { - "name": "CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", - "default_value": "By default, network policies are not created.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "category": { - "name": "Workload", - "subCategory": { - "name": "Secrets", - "id": "Cat-3" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-secrets-in-env-var", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if Pods have secrets in environment variables", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "CIS-5.4.2 Consider external secret storage", - "controlID": "C-0208", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "manual_test": "Review your secrets management implementation.", - "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" - ], - "attributes": {}, - "baseScore": 5, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "external-secret-storage", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" - } - ] - }, - { - "name": "CIS-5.7.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-namespaces", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - } - ], - "ruleDependencies": [], - "description": "lists all namespaces for users to review", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", - "controlID": "C-0210", - "description": "Enable `docker/default` seccomp profile in your pod definitions.", - "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", - "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", - "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "set-seccomp-profile-RuntimeDefault", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile as RuntimeDefault", - "remediation": "Make sure you define seccompProfile as RuntimeDefault at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n wl_spec := wl.spec\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl_spec := wl.spec.template.spec\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n wl_spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# container definition takes precedence\nget_seccompProfile_definition(wl, container, i, path_to_containers, path_to_search) = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type == \"RuntimeDefault\"\n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type != \"RuntimeDefault\"\n failed_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type == \"RuntimeDefault\" \n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type != \"RuntimeDefault\" \n\tfailed_path := sprintf(\"%s.%s\", [trim_suffix(concat(\".\", path_to_containers), \".containers\"), concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result{\n\tfix_path := [{\"path\": sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]), \"value\":\"RuntimeDefault\"}]\n\tseccompProfile_result := {\"failed\": true, \"failed_path\": [], \"fix_path\": fix_path}\n}\n" - } - ] - }, - { - "name": "CIS-5.7.3 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" - ], - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - }, - { - "name": "immutable-container-filesystem", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" - }, - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - }, - { - "name": "drop-capability-netraw", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "set-seLinuxOptions", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if workload and container do not define any seLinuxOptions", - "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-seccomp-profile", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-procmount-default", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if container does not define securityContext.procMount to Default.", - "remediation": "Set securityContext.procMount to Default", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n" - }, - { - "name": "set-fsgroup-value", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n" - }, - { - "name": "set-fsgroupchangepolicy-value", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" - }, - { - "name": "set-sysctls-params", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.sysctls is not set.", - "remediation": "Set securityContext.sysctls params", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has sysctls set\n not pod.spec.securityContext.sysctls\n\n path := \"spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.sysctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has sysctls set\n not wl.spec.template.spec.securityContext.sysctls\n\n path := \"spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.sysctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has sysctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.sysctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "set-supplementalgroups-values", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.supplementalgroups is not set.", - "remediation": "Set securityContext.supplementalgroups values", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n\tnot pod.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n\tnot wl.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n\tnot cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n" - }, - { - "name": "rule-allow-privilege-escalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "CIS-5.7.4 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "rolebinding-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "role-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "configmap-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "endpoints-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Endpoints" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "persistentvolumeclaim-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PersistentVolumeClaim" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "podtemplate-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodTemplate" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "replicationcontroller-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ReplicationController" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "service-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "serviceaccount-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "endpointslice-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "discovery.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "EndpointSlice" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "horizontalpodautoscaler-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "autoscaling" - ], - "apiVersions": [ - "v2" - ], - "resources": [ - "HorizontalPodAutoscaler" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "lease-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "coordination.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Lease" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "csistoragecapacity-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "storage.k8s.io" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "CSIStorageCapacity" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ingress-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "poddisruptionbudget-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodDisruptionBudget" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-secret-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Secret" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0092", - "C-0093", - "C-0094", - "C-0095", - "C-0096", - "C-0097", - "C-0098", - "C-0099", - "C-0100", - "C-0101", - "C-0102", - "C-0103", - "C-0104", - "C-0105", - "C-0106", - "C-0107", - "C-0108", - "C-0109", - "C-0110", - "C-0111", - "C-0112", - "C-0113", - "C-0114", - "C-0115", - "C-0116", - "C-0117", - "C-0118", - "C-0119", - "C-0120", - "C-0121", - "C-0122", - "C-0123", - "C-0124", - "C-0125", - "C-0126", - "C-0127", - "C-0128", - "C-0129", - "C-0130", - "C-0131", - "C-0132", - "C-0133", - "C-0134", - "C-0135", - "C-0136", - "C-0137", - "C-0138", - "C-0139", - "C-0140", - "C-0141", - "C-0142", - "C-0143", - "C-0144", - "C-0145", - "C-0146", - "C-0147", - "C-0148", - "C-0149", - "C-0150", - "C-0151", - "C-0152", - "C-0153", - "C-0154", - "C-0155", - "C-0156", - "C-0157", - "C-0158", - "C-0159", - "C-0160", - "C-0161", - "C-0162", - "C-0163", - "C-0164", - "C-0165", - "C-0166", - "C-0167", - "C-0168", - "C-0169", - "C-0170", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0182", - "C-0183", - "C-0184", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191", - "C-0192", - "C-0193", - "C-0194", - "C-0195", - "C-0196", - "C-0197", - "C-0198", - "C-0199", - "C-0200", - "C-0201", - "C-0202", - "C-0203", - "C-0204", - "C-0205", - "C-0206", - "C-0207", - "C-0208", - "C-0209", - "C-0210", - "C-0211", - "C-0212" - ] -} \ No newline at end of file diff --git a/releaseDev/clusterscan.json b/releaseDev/clusterscan.json deleted file mode 100644 index d9ffbb2e0..000000000 --- a/releaseDev/clusterscan.json +++ /dev/null @@ -1,1812 +0,0 @@ -{ - "name": "ClusterScan", - "description": "Framework for scanning a cluster", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "security" - ], - "version": null, - "controls": [ - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ] - }, - { - "name": "RBAC enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rbac-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" - }, - { - "name": "rbac-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "insecure-port-flag", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "anonymous-access-enabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in case anonymous or unauthenticated user has any rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", - "remediation": "Remove any RBAC rules which allow anonymous users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n subject := rolebinding.subjects[i]\n isAnonymous(subject)\n delete_path := sprintf(\"subjects[%d]\", [i])\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"deletePaths\": [delete_path],\n \"failedPaths\": [delete_path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(subject) {\n subject.name == \"system:anonymous\"\n}\n\nisAnonymous(subject) {\n subject.name == \"system:unauthenticated\"\n}\n" - } - ] - }, - { - "controlID": "C-0265", - "name": "Authenticated user has sensitive permissions", - "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "system-authenticated-allowed-to-take-over-cluster", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in system:authenticated user has cluster takeover rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", - "remediation": "Remove any RBAC rules which allow system:authenticated users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n subjectVector := input[_]\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\n subject := rolebinding.subjects[k]\n # Check if the subject is gourp\n subject.kind == \"Group\"\n # Check if the subject is system:authenticated\n subject.name == \"system:authenticated\"\n\n\n # Find the bound roles\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n # Check if the role and rolebinding bound\n is_same_role_and_binding(role, rolebinding)\n\n\n # Check if the role has access to workloads, exec, attach, portforward\n\trule := role.rules[p]\n rule.resources[l] in [\"*\",\"pods\", \"pods/exec\", \"pods/attach\", \"pods/portforward\",\"deployments\",\"statefulset\",\"daemonset\",\"jobs\",\"cronjobs\",\"nodes\",\"secrets\"]\n\n\tfinalpath := array.concat([\"\"], [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [i]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": \"system:authenticated has sensitive roles\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\" : subjectVector\n\t\t},\n\t}\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"RoleBinding\"\n role.kind == \"Role\"\n rolebinding.metadata.namespace == role.metadata.namespace\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"ClusterRoleBinding\"\n role.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}" - } - ] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Roles with delete capabilities", - "attributes": { - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-excessive-delete-rights-v1", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-portforward-v1", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Validate admission controller (validating)", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0036", - "baseScore": 3.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-validating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Validate admission controller" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns validating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Validate admission controller (mutating)", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0039", - "baseScore": 4.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-mutating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Validate admission controller" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns mutating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-create-pod", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can create pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - } - ] - }, - { - "name": "Missing network policy", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ensure_network_policy_configured_in_labels", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "description": "fails if no networkpolicy configured in workload labels", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Exposure to internet", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "service-destruction", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "external-workload-with-cluster-takeover-roles", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "external-database-without-authentication", - "categories": [ - "Initial Access" - ] - } - ] - }, - "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", - "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", - "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", - "controlID": "C-0256", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "exposure-to-internet", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n\t\t \"reviewPaths\": failPath,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n\n # Make sure that they belong to the same namespace\n svc.metadata.namespace == ingress.metadata.namespace\n\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [\n\t\t{\n\t \"object\": ingress,\n\t\t \"reviewPaths\": result,\n\t \"failedPaths\": result,\n\t },\n\t\t{\n\t \"object\": svc,\n\t\t}\n ]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n" - } - ] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-pid-ipc-privileges", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - } - ] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-network-access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - } - ] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0066", - "C-0088", - "C-0067", - "C-0005", - "C-0262", - "C-0265", - "C-0015", - "C-0002", - "C-0007", - "C-0063", - "C-0036", - "C-0039", - "C-0035", - "C-0188", - "C-0187", - "C-0012", - "C-0260", - "C-0256", - "C-0038", - "C-0041", - "C-0048", - "C-0057", - "C-0013" - ] -} \ No newline at end of file diff --git a/releaseDev/controls.json b/releaseDev/controls.json deleted file mode 100644 index 7002ee55d..000000000 --- a/releaseDev/controls.json +++ /dev/null @@ -1,7132 +0,0 @@ -[ - { - "controlID": "C-0105", - "name": "Ensure that the admin.conf file ownership is set to root:root", - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" - ], - "rulesNames": [ - "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0108", - "name": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" - ], - "rulesNames": [ - "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" - ], - "attributes": {}, - "rulesNames": [ - "list-all-namespaces" - ], - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0106", - "name": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" - ], - "rulesNames": [ - "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Workloads with RCE vulnerabilities exposed to external traffic", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their pod has either LoadBalancer or NodePort service.", - "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the pod to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", - "rulesNames": [ - "exposed-rce-pods" - ], - "long_description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their pod has either LoadBalancer or NodePort service.", - "test": "This control enumerates external facing workloads, that have LoadBalancer or NodePort service and checks the image vulnerability information for the RCE vulnerability.", - "controlID": "C-0084", - "baseScore": 8.0, - "example": "@controls/examples/c84.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "rulesNames": [ - "rule-credentials-in-env-var", - "rule-credentials-configmap" - ], - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" - ], - "attributes": {}, - "rulesNames": [ - "rule-secrets-in-env-var" - ], - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "category": { - "name": "Workload", - "subCategory": { - "name": "Secrets", - "id": "Cat-3" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "rulesNames": [ - "resources-cpu-limits" - ], - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0124", - "name": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used" - ], - "baseScore": 4, - "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", - "default_value": "By default, `SecurityContextDeny` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0216", - "name": "Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "rulesNames": [ - "psp-deny-hostnetwork" - ], - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0129", - "name": "Ensure that the API Server --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-profiling-argument-is-set-to-false" - ], - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0111", - "name": "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" - ], - "rulesNames": [ - "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Workload with cluster takeover roles", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "external-workload-with-cluster-takeover-roles", - "categories": [ - "Cluster Access" - ], - "displayRelatedResources": true, - "clickableResourceKind": "ServiceAccount" - } - ] - }, - "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", - "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", - "rulesNames": [ - "workload-with-cluster-takeover-roles" - ], - "long_description": "In Kubernetes, workloads with overly permissive roles pose a significant security risk. When a workload is granted roles that exceed the necessities of its operation, it creates an attack surface for privilege escalation within the cluster. This is especially critical if the roles include permissions for creating, updating, or accessing sensitive resources or secrets. An attacker exploiting such a workload can leverage these excessive privileges to perform unauthorized actions, potentially leading to a full cluster takeover. Ensuring that each service account associated with a workload is limited to permissions that are strictly necessary for its function is crucial in mitigating the risk of cluster takeovers.", - "test": "Check if the service account used by a workload has cluster takeover roles.", - "controlID": "C-0267", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0160", - "name": "Ensure that a minimal audit policy is created", - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", - "remediation": "Create an audit policy file for your cluster.", - "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" - ], - "attributes": {}, - "rulesNames": [ - "k8s-audit-logs-enabled-native-cis" - ], - "baseScore": 5, - "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", - "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0199", - "name": "Minimize the admission of containers with the NET_RAW capability", - "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-baseline-applied-1", - "pod-security-admission-baseline-applied-2" - ], - "baseScore": 6, - "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0226", - "name": "Prefer using a container-optimized OS when possible", - "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", - "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", - "remediation": "", - "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", - "references": [ - "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", - "https://aws.amazon.com/bottlerocket/" - ], - "attributes": {}, - "rulesNames": [ - "alert-container-optimized-os-not-in-use" - ], - "baseScore": 3, - "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", - "default_value": "A container-optimized OS is not the default.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0145", - "name": "Ensure that the Controller Manager --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-controller-manager-profiling-argument-is-set-to-false" - ], - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0167", - "name": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" - ], - "rulesNames": [ - "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0151", - "name": "Ensure that the Scheduler --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-scheduler-profiling-argument-is-set-to-false" - ], - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "rulesNames": [ - "rule-privilege-escalation" - ], - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "rulesNames": [ - "rule-can-list-get-secrets-v1" - ], - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0159", - "name": "Ensure that a unique Certificate Authority is used for etcd", - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", - "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" - ], - "attributes": {}, - "rulesNames": [ - "etcd-unique-ca" - ], - "baseScore": 8, - "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", - "default_value": "By default, no etcd certificate is created and used.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0134", - "name": "Ensure that the API Server --request-timeout argument is set as appropriate", - "description": "Set global request timeout for API server requests as appropriate.", - "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--request-timeout` is set to 60 seconds.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0112", - "name": "Ensure that the Kubernetes PKI key file permissions are set to 600", - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" - ], - "rulesNames": [ - "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0152", - "name": "Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1" - ], - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0122", - "name": "Ensure that the admission control plugin AlwaysAdmit is not set", - "description": "Do not allow all requests.", - "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set" - ], - "baseScore": 8, - "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", - "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Resources memory limit and request", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ], - "actionRequired": "configuration" - }, - "description": "This control identifies all Pods for which the memory limit is not set.", - "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", - "rulesNames": [ - "resources-memory-limit-and-request" - ], - "controlID": "C-0004", - "example": "@controls/examples/c004.yaml", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0204", - "name": "Minimize the admission of containers which use HostPorts", - "description": "Do not generally permit containers which require the use of HostPorts.", - "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-baseline-applied-1", - "pod-security-admission-baseline-applied-2" - ], - "baseScore": 4, - "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the use of HostPorts.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "rulesNames": [ - "enforce-kubelet-client-tls-authentication-updated" - ], - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0102", - "name": "Ensure that the etcd data directory permissions are set to 700 or more restrictive", - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" - ], - "rulesNames": [ - "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory has permissions of `755`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Exposed sensitive interfaces", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Initial access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "rulesNames": [ - "exposed-sensitive-interfaces-v1" - ], - "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", - "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", - "controlID": "C-0021", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0103", - "name": "Ensure that the etcd data directory ownership is set to etcd:etcd", - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd" - ], - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0163", - "name": "Ensure that the kubelet service file ownership is set to root:root", - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "rulesNames": [ - "exec-into-container-v1" - ], - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0213", - "name": "Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" - ], - "attributes": {}, - "rulesNames": [ - "psp-deny-privileged-container" - ], - "baseScore": 8.0, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "name": "Workload with secret access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Secret Access" - ] - } - ] - }, - "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "rulesNames": [ - "workload-mounted-secrets" - ], - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", - "controlID": "C-0255", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0113", - "name": "Ensure that the API Server --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the API server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false" - ], - "baseScore": 8, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0240", - "name": "Ensure Network Policy is Enabled and set as appropriate", - "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", - "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": {}, - "rulesNames": [ - "rule-cni-enabled-aks" - ], - "baseScore": 6, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0153", - "name": "Ensure that the --cert-file and --key-file arguments are set as appropriate", - "description": "Configure TLS encryption for the etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" - ], - "attributes": {}, - "rulesNames": [ - "etcd-tls-enabled" - ], - "baseScore": 8, - "impact_statement": "Client connections only over TLS would be served.", - "default_value": "By default, TLS encryption is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "rulesNames": [ - "ingress-and-egress-blocked" - ], - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "rulesNames": [ - "rule-can-portforward-v1" - ], - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "rulesNames": [ - "nginx-ingress-snippet-annotation-vulnerability" - ], - "controlID": "C-0059", - "baseScore": 8.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure memory requests are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the memory requests are not set.", - "remediation": "Set the memory requests or use exception mechanism to avoid unnecessary notifications.", - "rulesNames": [ - "resources-memory-requests" - ], - "controlID": "C-0269", - "baseScore": 3.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0254", - "name": "Enable audit Logs", - "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", - "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", - "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "rulesNames": [ - "rule-manual" - ], - "baseScore": 5, - "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", - "default_value": "By default, cluster control plane logs aren't sent to be Logged.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0197", - "name": "Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-restricted-applied-1", - "pod-security-admission-restricted-applied-2" - ], - "baseScore": 6, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0133", - "name": "Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Validate admission controller (validating)", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "rulesNames": [ - "list-all-validating-webhooks" - ], - "controlID": "C-0036", - "baseScore": 3.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0248", - "name": "Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "rulesNames": [ - "ensure-clusters-are-created-with-private-nodes" - ], - "baseScore": 8, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "Pods in default namespace", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", - "remediation": "Create necessary namespaces and move all the pods from default namespace there.", - "rulesNames": [ - "pods-in-default-namespace" - ], - "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0196", - "name": "Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-baseline-applied-1", - "pod-security-admission-baseline-applied-2" - ], - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "rulesNames": [ - "alert-rw-hostpath" - ], - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0180", - "name": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" - ], - "attributes": {}, - "rulesNames": [ - "kubelet-event-qps" - ], - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "By default, `--event-qps` argument is set to `5`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0217", - "name": "Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "rulesNames": [ - "psp-deny-allowprivilegeescalation" - ], - "baseScore": 6.0, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "rulesNames": [ - "insecure-capabilities" - ], - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0130", - "name": "Ensure that the API Server --audit-log-path argument is set", - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-audit-log-path-argument-is-set" - ], - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0127", - "name": "Ensure that the admission control plugin NodeRestriction is set", - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-NodeRestriction-is-set" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `NodeRestriction` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Ensure that the seccomp profile is set to docker/default in your pod definitions", - "controlID": "C-0210", - "description": "Enable `docker/default` seccomp profile in your pod definitions.", - "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", - "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" - ], - "attributes": {}, - "rulesNames": [ - "set-seccomp-profile-RuntimeDefault" - ], - "baseScore": 4, - "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", - "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0176", - "name": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" - ], - "attributes": {}, - "rulesNames": [ - "kubelet-streaming-connection-idle-timeout" - ], - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0110", - "name": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root", - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" - ], - "rulesNames": [ - "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-3172-aggregated-API-server-redirect", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [] - }, - "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "rulesNames": [ - "CVE-2022-3172" - ], - "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", - "controlID": "C-0089", - "baseScore": 3.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Outdated Kubernetes version", - "attributes": {}, - "description": "Identifies Kubernetes clusters running on outdated versions. Using old versions can expose clusters to known vulnerabilities, compatibility issues, and miss out on improved features and security patches. Keeping Kubernetes up-to-date is crucial for maintaining security and operational efficiency.", - "remediation": "Regularly update Kubernetes clusters to the latest stable version to mitigate known vulnerabilities and enhance functionality. Plan and execute upgrades considering workload compatibility, testing in a staging environment before applying changes to production. Follow Kubernetes' best practices for version management and upgrades to ensure a smooth transition and minimal downtime.", - "rulesNames": [ - "outdated-k8s-version" - ], - "long_description": "Running an outdated version of Kubernetes poses significant security risks and operational challenges. Older versions may contain unpatched vulnerabilities, leading to potential security breaches and unauthorized access. Additionally, outdated clusters might not support newer, more secure, and efficient features, impacting both performance and security. Regularly updating Kubernetes ensures compliance with the latest security standards and access to enhanced functionalities.", - "test": "Verifies the current Kubernetes version against the latest stable releases.", - "controlID": "C-0273", - "baseScore": 2.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CoreDNS poisoning", - "attributes": { - "microsoftMitreColumns": [ - "Lateral Movement" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", - "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", - "rulesNames": [ - "rule-can-update-configmap-v1" - ], - "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", - "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", - "controlID": "C-0037", - "baseScore": 4.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "rulesNames": [ - "container-image-repository", - "container-image-repository-v1" - ], - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0225", - "name": "Prefer using dedicated EKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", - "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" - ], - "attributes": {}, - "rulesNames": [ - "ensure-default-service-accounts-has-only-default-roles", - "automount-default-service-account" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0123", - "name": "Ensure that the admission control plugin AlwaysPullImages is set", - "description": "Always pull images.", - "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set" - ], - "baseScore": 4, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", - "default_value": "By default, `AlwaysPullImages` is not set.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0227", - "name": "Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": {}, - "rulesNames": [ - "ensure-endpointprivateaccess-is-enabled" - ], - "baseScore": 8.0, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", - "default_value": "By default, Endpoint Public Access is disabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0198", - "name": "Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-restricted-applied-1", - "pod-security-admission-restricted-applied-2" - ], - "baseScore": 6, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0119", - "name": "Ensure that the API Server --authorization-mode argument includes Node", - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-authorization-mode-argument-includes-Node" - ], - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, `Node` authorization is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0251", - "name": "Minimize user access to Azure Container Registry (ACR)", - "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "rulesNames": [ - "list-role-definitions-in-acr" - ], - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "rulesNames": [ - "container-hostPort" - ], - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0238", - "name": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", - "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": {}, - "rulesNames": [ - "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive" - ], - "baseScore": 6, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0118", - "name": "Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not always authorize all requests.", - "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow" - ], - "baseScore": 7, - "impact_statement": "Only authorized requests will be served.", - "default_value": "By default, `AlwaysAllow` is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0233", - "name": "Consider Fargate for running untrusted workloads", - "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", - "long_description": "", - "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" - ], - "attributes": {}, - "rulesNames": [ - "alert-fargate-not-in-use" - ], - "baseScore": 3, - "impact_statement": "", - "default_value": "By default, AWS Fargate is not utilized.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "name": "Instance Metadata API", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "rulesNames": [ - "instance-metadata-api-access" - ], - "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", - "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", - "controlID": "C-0052", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "rulesNames": [ - "rule-list-all-cluster-admins-v1" - ], - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0136", - "name": "Ensure that the API Server --service-account-key-file argument is set as appropriate", - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate" - ], - "baseScore": 5, - "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-key-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0115", - "name": "Ensure that the API Server --DenyServiceExternalIPs is not set", - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set" - ], - "baseScore": 4, - "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Workload with ConfigMap access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Collection" - ] - } - ] - }, - "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "rulesNames": [ - "workload-mounted-configmap" - ], - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", - "controlID": "C-0258", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0245", - "name": "Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "rulesNames": [ - "encrypt-traffic-to-https-load-balancers-with-tls-certificates" - ], - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0235", - "name": "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive" - ], - "baseScore": 6.0, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0144", - "name": "Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", - "description": "Activate garbage collector on pod termination, as appropriate.", - "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0107", - "name": "Ensure that the scheduler.conf file ownership is set to root:root", - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" - ], - "rulesNames": [ - "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" - ], - "attributes": {}, - "rulesNames": [ - "pods-in-default-namespace", - "rolebinding-in-default-namespace", - "role-in-default-namespace", - "configmap-in-default-namespace", - "endpoints-in-default-namespace", - "persistentvolumeclaim-in-default-namespace", - "podtemplate-in-default-namespace", - "replicationcontroller-in-default-namespace", - "service-in-default-namespace", - "serviceaccount-in-default-namespace", - "endpointslice-in-default-namespace", - "horizontalpodautoscaler-in-default-namespace", - "lease-in-default-namespace", - "csistoragecapacity-in-default-namespace", - "ingress-in-default-namespace", - "poddisruptionbudget-in-default-namespace", - "resources-secret-in-default-namespace" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Forbidden Container Registries", - "attributes": { - "microsoftMitreColumns": [ - "Initial Access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "actionRequired": "configuration" - }, - "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", - "remediation": "Limit the registries from which you pull container images from", - "rulesNames": [ - "rule-identify-blocklisted-image-registries", - "rule-identify-blocklisted-image-registries-v1" - ], - "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", - "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", - "controlID": "C-0001", - "baseScore": 7.0, - "example": "@controls/examples/c001.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0262", - "name": "Anonymous user has RoleBinding", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": {}, - "rulesNames": [ - "anonymous-access-enabled" - ], - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0214", - "name": "Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "rulesNames": [ - "psp-deny-hostpid" - ], - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0128", - "name": "Ensure that the API Server --secure-port argument is not set to 0", - "description": "Do not disable the secure port.", - "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0" - ], - "baseScore": 8, - "impact_statement": "You need to set the API Server up with the right TLS certificates.", - "default_value": "By default, port 6443 is used as the secure port.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0093", - "name": "Ensure that the API server pod specification file ownership is set to root:root", - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" - ], - "rulesNames": [ - "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Naked pods", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", - "rulesNames": [ - "naked-pods" - ], - "long_description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", - "test": "Test if pods are not associated with Deployment, ReplicaSet etc. If not, fail.", - "controlID": "C-0073", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0104", - "name": "Ensure that the admin.conf file permissions are set to 600", - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" - ], - "rulesNames": [ - "ensure-that-the-admin.conf-file-permissions-are-set-to-600" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, admin.conf has permissions of `600`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "rulesNames": [ - "anonymous-requests-to-kubelet-service-updated" - ], - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Ensure that default service accounts are not actively used", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" - ], - "attributes": {}, - "rulesNames": [ - "automount-default-service-account", - "namespace-without-service-account" - ], - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0109", - "name": "Ensure that the controller-manager.conf file ownership is set to root:root", - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" - ], - "rulesNames": [ - "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0229", - "name": "Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", - "manual_test": "", - "references": [], - "attributes": {}, - "rulesNames": [ - "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks" - ], - "baseScore": 8.0, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "name": "Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" - ], - "attributes": {}, - "rulesNames": [ - "rule-can-create-pod" - ], - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0173", - "name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" - ], - "attributes": {}, - "rulesNames": [ - "kubelet-authorization-mode-alwaysAllow" - ], - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0141", - "name": "Ensure that the API Server --encryption-provider-config argument is set as appropriate", - "description": "Encrypt etcd key-value store.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate" - ], - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `--encryption-provider-config` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0161", - "name": "Ensure that the audit policy covers key security concerns", - "description": "Ensure that the audit policy created for the cluster covers key security concerns.", - "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", - "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", - "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" - ], - "attributes": {}, - "rulesNames": [ - "audit-policy-content" - ], - "baseScore": 5, - "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", - "default_value": "By default Kubernetes clusters do not log audit information.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0234", - "name": "Consider external secret storage", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "manual_test": "Review your secrets management implementation.", - "references": [], - "attributes": {}, - "rulesNames": [ - "ensure-external-secrets-storage-is-in-use" - ], - "baseScore": 6.0, - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0100", - "name": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0203", - "name": "Minimize the admission of HostPath volumes", - "description": "Do not generally admit containers which make use of `hostPath` volumes.", - "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-baseline-applied-1", - "pod-security-admission-baseline-applied-2" - ], - "baseScore": 6, - "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0157", - "name": "Ensure that the --peer-client-cert-auth argument is set to true", - "description": "etcd should be configured for peer authentication.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", - "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" - ], - "attributes": {}, - "rulesNames": [ - "etcd-peer-client-auth-cert" - ], - "baseScore": 7, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0094", - "name": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" - ], - "rulesNames": [ - "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0181", - "name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the Kubelets.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" - ], - "attributes": {}, - "rulesNames": [ - "validate-kubelet-tls-configuration-updated" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0183", - "name": "Verify that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" - ], - "attributes": {}, - "rulesNames": [ - "kubelet-rotate-kubelet-server-certificate" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet server certificate rotation is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0168", - "name": "Ensure that the certificate authorities file permissions are set to 600 or more restrictive", - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" - ], - "rulesNames": [ - "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0193", - "name": "Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-baseline-applied-1", - "pod-security-admission-baseline-applied-2" - ], - "baseScore": 8, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of privileged containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "rulesNames": [ - "rule-allow-privilege-escalation" - ], - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "rulesNames": [ - "CVE-2022-23648" - ], - "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", - "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", - "controlID": "C-0087", - "baseScore": 7.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Exposure to internet", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "service-destruction", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "external-workload-with-cluster-takeover-roles", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "external-database-without-authentication", - "categories": [ - "Initial Access" - ] - } - ] - }, - "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", - "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", - "rulesNames": [ - "exposure-to-internet" - ], - "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", - "controlID": "C-0256", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0099", - "name": "Ensure that the etcd pod specification file ownership is set to root:root", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" - ], - "rulesNames": [ - "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0241", - "name": "Use Azure RBAC for Kubernetes Authorization.", - "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", - "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", - "remediation": "Set Azure RBAC as access system.", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "rulesNames": [ - "ensure-azure-rbac-is-set" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "Kubernetes CronJob", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "rulesNames": [ - "rule-deny-cronjobs" - ], - "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", - "test": "We list all CronJobs that exist in cluster for the user to approve.", - "controlID": "C-0026", - "baseScore": 1.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0165", - "name": "If proxy kubeconfig file exists ensure ownership is set to root:root", - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" - ], - "rulesNames": [ - "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `proxy` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0121", - "name": "Ensure that the admission control plugin EventRateLimit is set", - "description": "Limit the rate at which the API server accepts requests.", - "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-EventRateLimit-is-set" - ], - "baseScore": 4, - "impact_statement": "You need to carefully tune in limits as per your environment.", - "default_value": "By default, `EventRateLimit` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0095", - "name": "Ensure that the controller manager pod specification file ownership is set to root:root", - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" - ], - "rulesNames": [ - "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Linux hardening", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "rulesNames": [ - "linux-hardening" - ], - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0182", - "name": "Ensure that the --rotate-certificates argument is not set to false", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", - "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" - ], - "attributes": {}, - "rulesNames": [ - "kubelet-rotate-certificates" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet client certificate rotation is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Image pull policy on latest tag", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", - "rulesNames": [ - "image-pull-policy-is-not-set-to-always" - ], - "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", - "test": "If imagePullPolicy = always pass, else fail.", - "controlID": "C-0075", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0220", - "name": "Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": {}, - "rulesNames": [ - "psp-required-drop-capabilities" - ], - "baseScore": 5.0, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0155", - "name": "Ensure that the --auto-tls argument is not set to true", - "description": "Do not use self-signed certificates for TLS.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" - ], - "attributes": {}, - "rulesNames": [ - "etcd-auto-tls-disabled" - ], - "baseScore": 6, - "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", - "default_value": "By default, `--auto-tls` is set to `false`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Missing network policy", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", - "rulesNames": [ - "ensure_network_policy_configured_in_labels" - ], - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ingress uses TLS", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "This control detect Ingress resources that do not use TLS", - "remediation": "The user needs to implement TLS for the Ingress resource in order to encrypt the incoming traffic", - "rulesNames": [ - "ingress-no-tls" - ], - "test": "Check if the Ingress resource has TLS configured", - "controlID": "C-0263", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0162", - "name": "Ensure that the kubelet service file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kubelet` service file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0228", - "name": "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "Check for private endpoint access to the Kubernetes API server", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": {}, - "rulesNames": [ - "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks" - ], - "baseScore": 8.0, - "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", - "default_value": "By default, the Public Endpoint is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0201", - "name": "Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-restricted-applied-1", - "pod-security-admission-restricted-applied-2" - ], - "baseScore": 5, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0247", - "name": "Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "rulesNames": [ - "restrict-access-to-the-control-plane-endpoint" - ], - "baseScore": 8, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", - "default_value": "By default, Endpoint Private Access is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0131", - "name": "Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", - "description": "Retain the logs for at least 30 days or as appropriate.", - "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "SSH server running inside container", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "rulesNames": [ - "rule-can-ssh-to-pod-v1" - ], - "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", - "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", - "controlID": "C-0042", - "baseScore": 3.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0184", - "name": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" - ], - "attributes": {}, - "rulesNames": [ - "kubelet-strong-cryptographics-ciphers" - ], - "baseScore": 5, - "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" - ], - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "rulesNames": [ - "rule-privilege-escalation", - "immutable-container-filesystem", - "non-root-containers", - "drop-capability-netraw", - "set-seLinuxOptions", - "set-seccomp-profile", - "set-procmount-default", - "set-fsgroup-value", - "set-fsgroupchangepolicy-value", - "set-sysctls-params", - "set-supplementalgroups-values", - "rule-allow-privilege-escalation" - ], - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "rulesNames": [ - "k8s-audit-logs-enabled-cloud", - "k8s-audit-logs-enabled-native" - ], - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "rulesNames": [ - "secret-etcd-encryption-cloud", - "etcd-encryption-native" - ], - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0265", - "name": "system:authenticated user has elevated roles", - "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", - "attributes": {}, - "rulesNames": [ - "system-authenticated-allowed-to-take-over-cluster" - ], - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0244", - "name": "Ensure Kubernetes Secrets are encrypted", - "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "rulesNames": [ - "secret-etcd-encryption-cloud" - ], - "baseScore": 6, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "rulesNames": [ - "insecure-port-flag" - ], - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0179", - "name": "Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", - "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" - ], - "attributes": {}, - "rulesNames": [ - "kubelet-hostname-override" - ], - "baseScore": 3, - "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", - "default_value": "By default, `--hostname-override` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0140", - "name": "Ensure that the API Server --etcd-cafile argument is set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate" - ], - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-cafile` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Roles with delete capabilities", - "attributes": { - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "rulesNames": [ - "rule-excessive-delete-rights-v1" - ], - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0221", - "name": "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", - "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", - "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" - ], - "attributes": {}, - "rulesNames": [ - "ensure-image-scanning-enabled-cloud" - ], - "baseScore": 5, - "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0222", - "name": "Minimize user access to Amazon ECR", - "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" - ], - "attributes": {}, - "rulesNames": [ - "ensure-aws-policies-are-present" - ], - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0147", - "name": "Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate" - ], - "baseScore": 6, - "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-private-key-file` it not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-39328-grafana-auth-bypass", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", - "remediation": "Update your Grafana to 9.2.4 or above", - "rulesNames": [ - "CVE-2022-39328" - ], - "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", - "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", - "controlID": "C-0090", - "baseScore": 9.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0195", - "name": "Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-baseline-applied-1", - "pod-security-admission-baseline-applied-2" - ], - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0120", - "name": "Ensure that the API Server --authorization-mode argument includes RBAC", - "description": "Turn on Role Based Access Control.", - "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC" - ], - "baseScore": 8, - "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", - "default_value": "By default, `RBAC` authorization is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "controlID": "C-0191", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" - ], - "attributes": {}, - "rulesNames": [ - "rule-can-bind-escalate", - "rule-can-impersonate-users-groups-v1" - ], - "baseScore": 6, - "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0205", - "name": "Ensure that the CNI in use supports Network Policies", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-cni-in-use-supports-network-policies" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "This will depend on the CNI plugin in use.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0232", - "name": "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", - "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", - "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", - "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", - "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" - ], - "attributes": {}, - "rulesNames": [ - "review-roles-with-aws-iam-authenticator" - ], - "baseScore": 7, - "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", - "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0246", - "name": "Avoid use of system:masters group", - "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", - "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", - "remediation": "Remove the `system:masters` group from all users in the cluster.", - "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", - "references": [ - "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" - ], - "attributes": {}, - "rulesNames": [ - "rule-manual" - ], - "baseScore": 8, - "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", - "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "rulesNames": [ - "host-network-access" - ], - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Consider external secret storage", - "controlID": "C-0208", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "manual_test": "Review your secrets management implementation.", - "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" - ], - "attributes": {}, - "rulesNames": [ - "external-secret-storage" - ], - "baseScore": 5, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "rulesNames": [ - "immutable-container-filesystem" - ], - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" - ], - "attributes": {}, - "rulesNames": [ - "cluster-admin-role" - ], - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0172", - "name": "Ensure that the --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" - ], - "attributes": {}, - "rulesNames": [ - "anonymous-requests-to-kubelet-service-updated" - ], - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0218", - "name": "Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" - ], - "attributes": {}, - "rulesNames": [ - "psp-deny-root-container" - ], - "baseScore": 6.0, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "K8s common labels usage", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "devops" - ] - }, - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "rulesNames": [ - "k8s-common-labels-usage" - ], - "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", - "controlID": "C-0077", - "baseScore": 2.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0200", - "name": "Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-restricted-applied-1", - "pod-security-admission-restricted-applied-2" - ], - "baseScore": 5, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, there are no restrictions on adding capabilities to containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure CPU requests are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the CPU requests are not set.", - "remediation": "Set the CPU requests or use exception mechanism to avoid unnecessary notifications.", - "rulesNames": [ - "resources-cpu-requests" - ], - "controlID": "C-0268", - "baseScore": 3.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0174", - "name": "Ensure that the --client-ca-file argument is set as appropriate", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" - ], - "attributes": {}, - "rulesNames": [ - "enforce-kubelet-client-tls-authentication-updated" - ], - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0242", - "name": "Hostile multi-tenant workloads", - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "rulesNames": [ - "rule-hostile-multitenant-workloads" - ], - "baseScore": 5, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "ServiceAccount token mounted", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", - "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", - "rulesNames": [ - "serviceaccount-token-mount" - ], - "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", - "controlID": "C-0261", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0098", - "name": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" - ], - "rulesNames": [ - "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0135", - "name": "Ensure that the API Server --service-account-lookup argument is set to true", - "description": "Validate service account before validating token.", - "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `--service-account-lookup` argument is set to `true`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0171", - "name": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" - ], - "rulesNames": [ - "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0170", - "name": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" - ], - "rulesNames": [ - "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0175", - "name": "Verify that the --read-only-port argument is set to 0", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" - ], - "attributes": {}, - "rulesNames": [ - "read-only-port-enabled-updated" - ], - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Sudo in container entrypoint", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "rulesNames": [ - "sudo-in-container-entrypoint" - ], - "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", - "test": "Check that there is no 'sudo' in the container entrypoint", - "controlID": "C-0062", - "baseScore": 5.0, - "example": "@controls/examples/c062.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "rulesNames": [ - "automount-service-account" - ], - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0154", - "name": "Ensure that the --client-cert-auth argument is set to true", - "description": "Enable client authentication on etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" - ], - "attributes": {}, - "rulesNames": [ - "etcd-client-auth-cert" - ], - "baseScore": 8, - "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", - "default_value": "By default, the etcd service can be queried by unauthenticated clients.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0101", - "name": "Ensure that the Container Network Interface file ownership is set to root:root", - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Resource limits", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/pod manifests.", - "rulesNames": [ - "resource-policies" - ], - "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", - "controlID": "C-0009", - "baseScore": 7.0, - "example": "@controls/examples/c009.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0117", - "name": "Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", - "description": "Verify kubelet's certificate before establishing connection.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate" - ], - "baseScore": 8, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" - ], - "attributes": {}, - "rulesNames": [ - "internal-networking" - ], - "baseScore": 4, - "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", - "default_value": "By default, network policies are not created.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0252", - "name": "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", - "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": {}, - "rulesNames": [ - "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled" - ], - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0137", - "name": "Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate" - ], - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0178", - "name": "Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" - ], - "attributes": {}, - "rulesNames": [ - "kubelet-ip-tables" - ], - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0166", - "name": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" - ], - "rulesNames": [ - "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file has permissions of `600`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Workloads with Critical vulnerabilities exposed to external traffic", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service is assigned to them.", - "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the pod to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", - "rulesNames": [ - "exposed-critical-pods" - ], - "long_description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service assigned to them.", - "test": "This control enumerates external facing workloads, that have LoadBalancer or NodePort services and checks image vulnerability information to see if the image has critical vulnerabilities.", - "controlID": "C-0083", - "baseScore": 8.0, - "example": "@controls/examples/c83.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0146", - "name": "Ensure that the Controller Manager --use-service-account-credentials argument is set to true", - "description": "Use individual service account credentials for each controller.", - "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true" - ], - "baseScore": 4, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", - "default_value": "By default, `--use-service-account-credentials` is set to false.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0192", - "name": "Ensure that the cluster has at least one active policy control mechanism in place", - "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", - "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", - "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", - "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-applied-1", - "pod-security-admission-applied-2" - ], - "baseScore": 4, - "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", - "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workload with credential access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "This control checks if workloads specifications have sensitive information in their environment variables.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "rulesNames": [ - "rule-credentials-in-env-var" - ], - "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", - "controlID": "C-0259", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0148", - "name": "Ensure that the Controller Manager --root-ca-file argument is set as appropriate", - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate" - ], - "baseScore": 7, - "impact_statement": "You need to setup and maintain root certificate authority file.", - "default_value": "By default, `--root-ca-file` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0239", - "name": "Prefer using dedicated AKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", - "references": [ - "" - ], - "attributes": {}, - "rulesNames": [ - "ensure-default-service-accounts-has-only-default-roles" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0236", - "name": "Verify image signature", - "description": "Verifies the signature of each image with given public keys", - "long_description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "manual_test": "", - "references": [], - "attributes": { - "actionRequired": "configuration" - }, - "rulesNames": [ - "verify-image-signature" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0215", - "name": "Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "rulesNames": [ - "psp-deny-hostipc" - ], - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "RBAC enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "rulesNames": [ - "rbac-enabled-cloud", - "rbac-enabled-native" - ], - "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Workloads with excessive amount of vulnerabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", - "remediation": "Update your workload images as soon as possible when fixes become available.", - "rulesNames": [ - "excessive_amount_of_vulnerabilities_pods" - ], - "long_description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", - "test": "This control enumerates workloads and checks if they have excessive amount of vulnerabilities in their container images. The threshold of \u201cexcessive number\u201d is configurable.", - "controlID": "C-0085", - "baseScore": 6.0, - "example": "@controls/examples/c85.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0243", - "name": "Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", - "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "rulesNames": [ - "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider" - ], - "baseScore": 5, - "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0096", - "name": "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" - ], - "rulesNames": [ - "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Workload with PVC access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Collection" - ] - } - ] - }, - "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "rulesNames": [ - "workload-mounted-pvc" - ], - "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", - "controlID": "C-0257", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "rules": [] - }, - { - "controlID": "C-0194", - "name": "Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-baseline-applied-1", - "pod-security-admission-baseline-applied-2" - ], - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" - ], - "attributes": {}, - "rulesNames": [ - "automount-service-account" - ], - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "rulesNames": [ - "host-pid-ipc-privileges" - ], - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0116", - "name": "Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", - "description": "Enable certificate based kubelet authentication.", - "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate" - ], - "baseScore": 7, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, certificate-based kubelet authentication is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Validate admission controller (mutating)", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "rulesNames": [ - "list-all-mutating-webhooks" - ], - "controlID": "C-0039", - "baseScore": 4.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0219", - "name": "Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": {}, - "rulesNames": [ - "psp-deny-allowed-capabilities" - ], - "baseScore": 5.0, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0125", - "name": "Ensure that the admission control plugin ServiceAccount is set", - "description": "Automate service accounts management.", - "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-ServiceAccount-is-set" - ], - "baseScore": 3, - "impact_statement": "None.", - "default_value": "By default, `ServiceAccount` is set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Cluster internal networking", - "attributes": { - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "rulesNames": [ - "internal-networking" - ], - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workload with administrative roles", - "attributes": {}, - "description": "This control identifies workloads where the associated service accounts have roles that grant administrative-level access across the cluster. Granting a workload such expansive permissions equates to providing it cluster admin roles. This level of access can pose a significant security risk, as it allows the workload to perform any action on any resource, potentially leading to unauthorized data access or cluster modifications.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use service accounts with such high permissions for daily operations.", - "rulesNames": [ - "workload-with-administrative-roles" - ], - "long_description": "In Kubernetes environments, workloads granted administrative-level privileges without restrictions represent a critical security vulnerability. When a service account associated with a workload is configured with permissions to perform any action on any resource, it essentially holds unrestricted access within the cluster, akin to cluster admin privileges. This configuration dramatically increases the risk of security breaches, including data theft, unauthorized modifications, and potentially full cluster takeovers. Such privileges allow attackers to exploit the workload for wide-ranging malicious activities, bypassing the principle of least privilege. Therefore, it's essential to follow the least privilege principle and make sure cluster admin permissions are granted only when it is absolutely necessary.", - "test": "Check if the service account used by a workload has cluster admin roles, either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges.", - "controlID": "C-0272", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0249", - "name": "Restrict untrusted workloads", - "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", - "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "actionRequired": "manual review" - }, - "rulesNames": [ - "rule-manual" - ], - "baseScore": 5, - "impact_statement": "", - "default_value": "ACI is not a default component of the AKS", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "rulesNames": [ - "alert-any-hostpath" - ], - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0114", - "name": "Ensure that the API Server --token-auth-file parameter is not set", - "description": "Do not use token based authentication.", - "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-token-auth-file-parameter-is-not-set" - ], - "baseScore": 8, - "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "PSP enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "rulesNames": [ - "psp-enabled-cloud", - "psp-enabled-native" - ], - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0143", - "name": "Ensure that the API Server only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers" - ], - "baseScore": 5, - "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "rulesNames": [ - "CVE-2022-24348" - ], - "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0081", - "baseScore": 4.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "rulesNames": [ - "CVE-2022-0185" - ], - "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0079", - "baseScore": 4.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0250", - "name": "Minimize cluster access to read-only for Azure Container Registry (ACR)", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", - "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "rulesNames": [ - "ensure-service-principle-has-read-only-permissions" - ], - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "rulesNames": [ - "CVE-2022-47633" - ], - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", - "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", - "controlID": "C-0091", - "baseScore": 8.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Configured readiness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "rulesNames": [ - "configured-readiness-probe" - ], - "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", - "controlID": "C-0018", - "example": "@controls/examples/c018.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container runtime socket mounted", - "attributes": { - "controlTypeTags": [ - "devops", - "smartRemediation" - ] - }, - "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", - "remediation": "Remove container runtime socket mount request or define an exception.", - "rulesNames": [ - "containers-mounting-docker-socket" - ], - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0138", - "name": "Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate" - ], - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0164", - "name": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" - ], - "rulesNames": [ - "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, proxy file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0149", - "name": "Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation on controller-manager.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0177", - "name": "Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" - ], - "attributes": {}, - "rulesNames": [ - "kubelet-protect-kernel-defaults" - ], - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "By default, `--protect-kernel-defaults` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0150", - "name": "Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1" - ], - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Mount service principal", - "attributes": { - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "rulesNames": [ - "alert-mount-potential-credentials-paths" - ], - "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", - "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", - "controlID": "C-0020", - "baseScore": 4.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Access Kubernetes dashboard", - "attributes": { - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "rulesNames": [ - "rule-access-dashboard-subject-v1", - "rule-access-dashboard-wl-v1" - ], - "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", - "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", - "controlID": "C-0014", - "baseScore": 2.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" - ], - "attributes": {}, - "rulesNames": [ - "rule-can-list-get-secrets-v1" - ], - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "PersistentVolume without encyption", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "This control detects PersistentVolumes without encyption", - "remediation": "Enable encryption on the PersistentVolume using the configuration in StorageClass", - "rulesNames": [ - "pv-without-encryption" - ], - "test": "Checking all PersistentVolumes via their StorageClass for encryption", - "controlID": "C-0264", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" - ], - "attributes": {}, - "rulesNames": [ - "rule-list-all-cluster-admins-v1" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "rulesNames": [ - "non-root-containers" - ], - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0202", - "name": "Minimize the admission of Windows HostProcess Containers", - "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", - "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" - ], - "attributes": {}, - "rulesNames": [ - "pod-security-admission-baseline-applied-1", - "pod-security-admission-baseline-applied-2" - ], - "baseScore": 7, - "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "rulesNames": [ - "Symlink-Exchange-Can-Allow-Host-Filesystem-Access" - ], - "controlID": "C-0058", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Label usage for resources", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "rulesNames": [ - "label-usage-for-resources" - ], - "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "controlID": "C-0076", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Resources CPU limit and request", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the CPU limit is not set.", - "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", - "rulesNames": [ - "resources-cpu-limit-and-request" - ], - "controlID": "C-0050", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0142", - "name": "Ensure that encryption providers are appropriately configured", - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-encryption-providers-are-appropriately-configured" - ], - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, no encryption provider is set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0237", - "name": "Check if signature exists", - "description": "Ensures that all images contain some signature", - "long_description": "Verifies that each image is signed", - "remediation": "Replace the image with a signed image", - "manual_test": "", - "references": [], - "attributes": {}, - "rulesNames": [ - "has-image-signature" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0169", - "name": "Ensure that the client certificate authorities file ownership is set to root:root", - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" - ], - "rulesNames": [ - "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0156", - "name": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" - ], - "attributes": {}, - "rulesNames": [ - "etcd-peer-tls-enabled" - ], - "baseScore": 7, - "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0223", - "name": "Minimize cluster access to read-only for Amazon ECR", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", - "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", - "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" - ], - "attributes": {}, - "rulesNames": [ - "ensure_nodeinstancerole_has_right_permissions_for_ecr" - ], - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0231", - "name": "Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" - ], - "attributes": {}, - "rulesNames": [ - "ensure-https-loadbalancers-encrypted-with-tls-aws" - ], - "baseScore": 5.0, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0132", - "name": "Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", - "description": "Retain 10 or an appropriate number of old log files.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0158", - "name": "Ensure that the --peer-auto-tls argument is not set to true", - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" - ], - "attributes": {}, - "rulesNames": [ - "etcd-peer-auto-tls-disabled" - ], - "baseScore": 6, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Deprecated Kubernetes image registry", - "attributes": {}, - "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", - "rulesNames": [ - "rule-identify-old-k8s-registry" - ], - "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", - "controlID": "C-0253", - "baseScore": 5.0, - "example": "@controls/examples/c239.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Access container service account", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", - "rulesNames": [ - "access-container-service-account-v1" - ], - "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", - "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", - "controlID": "C-0053", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Configured liveness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "rulesNames": [ - "configured-liveness-probe" - ], - "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", - "controlID": "C-0056", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0097", - "name": "Ensure that the scheduler pod specification file ownership is set to root:root", - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" - ], - "rulesNames": [ - "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0230", - "name": "Ensure Network Policy is Enabled and set as appropriate", - "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", - "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", - "remediation": "", - "manual_test": "", - "references": [], - "attributes": {}, - "rulesNames": [ - "ensure-network-policy-is-enabled-eks" - ], - "baseScore": 6.0, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0092", - "name": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" - ], - "rulesNames": [ - "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Network mapping", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "rulesNames": [ - "internal-networking" - ], - "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0049", - "baseScore": 3.0, - "example": "@controls/examples/c049.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "No impersonation", - "attributes": { - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", - "rulesNames": [ - "rule-can-impersonate-users-groups-v1" - ], - "controlID": "C-0065", - "baseScore": 6.0, - "example": "@controls/examples/c065.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0126", - "name": "Ensure that the admission control plugin NamespaceLifecycle is set", - "description": "Reject creating objects in a namespace that is undergoing termination.", - "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set" - ], - "baseScore": 3, - "impact_statement": "None", - "default_value": "By default, `NamespaceLifecycle` is set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Delete Kubernetes events", - "attributes": { - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "rulesNames": [ - "rule-can-delete-k8s-events-v1" - ], - "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", - "test": "List who has delete/deletecollection RBAC permissions on events.", - "controlID": "C-0031", - "baseScore": 4.0, - "example": "@controls/examples/c031.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "rulesNames": [ - "resources-memory-limits" - ], - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0139", - "name": "Ensure that the API Server --client-ca-file argument is set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" - ], - "attributes": {}, - "rulesNames": [ - "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate" - ], - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - } -] \ No newline at end of file diff --git a/releaseDev/default_config_inputs.json b/releaseDev/default_config_inputs.json deleted file mode 100644 index 6d17fc2da..000000000 --- a/releaseDev/default_config_inputs.json +++ /dev/null @@ -1,145 +0,0 @@ -{ - "name": "default", - "attributes": {}, - "scope": { - "designatorType": "attributes", - "attributes": {} - }, - "settings": { - "postureControlInputs": { - "imageRepositoryAllowList": [], - "trustedCosignPublicKeys": [], - "insecureCapabilities": [ - "SETPCAP", - "NET_ADMIN", - "NET_RAW", - "SYS_MODULE", - "SYS_RAWIO", - "SYS_PTRACE", - "SYS_ADMIN", - "SYS_BOOT", - "MAC_OVERRIDE", - "MAC_ADMIN", - "PERFMON", - "ALL", - "BPF" - ], - "listOfDangerousArtifacts": [ - "bin/bash", - "sbin/sh", - "bin/ksh", - "bin/tcsh", - "bin/zsh", - "usr/bin/scsh", - "bin/csh", - "bin/busybox", - "usr/bin/busybox" - ], - "publicRegistries": [], - "sensitiveInterfaces": [ - "nifi", - "argo-server", - "weave-scope-app", - "kubeflow", - "kubernetes-dashboard", - "jenkins", - "prometheus-deployment" - ], - "max_critical_vulnerabilities": [ - "5" - ], - "max_high_vulnerabilities": [ - "10" - ], - "sensitiveKeyNames": [ - "aws_access_key_id", - "aws_secret_access_key", - "azure_batchai_storage_account", - "azure_batchai_storage_key", - "azure_batch_account", - "azure_batch_key", - "secret", - "key", - "password", - "pwd", - "token", - "jwt", - "bearer", - "credential" - ], - "sensitiveValues": [ - "BEGIN \\w+ PRIVATE KEY", - "PRIVATE KEY", - "eyJhbGciO", - "JWT", - "Bearer", - "_key_", - "_secret_" - ], - "sensitiveKeyNamesAllowed": [], - "sensitiveValuesAllowed": [], - "servicesNames": [ - "nifi-service", - "argo-server", - "minio", - "postgres", - "workflow-controller-metrics", - "weave-scope-app", - "kubernetes-dashboard" - ], - "untrustedRegistries": [], - "memory_request_max": [], - "memory_request_min": [ - "0" - ], - "memory_limit_max": [], - "memory_limit_min": [ - "0" - ], - "cpu_request_max": [], - "cpu_request_min": [ - "0" - ], - "cpu_limit_max": [], - "cpu_limit_min": [ - "0" - ], - "wlKnownNames": [ - "coredns", - "kube-proxy", - "event-exporter-gke", - "kube-dns", - "17-default-backend", - "metrics-server", - "ca-audit", - "ca-dashboard-aggregator", - "ca-notification-server", - "ca-ocimage", - "ca-oracle", - "ca-posture", - "ca-rbac", - "ca-vuln-scan", - "ca-webhook", - "ca-websocket", - "clair-clair" - ], - "recommendedLabels": [ - "app", - "tier", - "phase", - "version", - "owner", - "env" - ], - "k8sRecommendedLabels": [ - "app.kubernetes.io/name", - "app.kubernetes.io/instance", - "app.kubernetes.io/version", - "app.kubernetes.io/component", - "app.kubernetes.io/part-of", - "app.kubernetes.io/managed-by", - "app.kubernetes.io/created-by" - ] - } - } -} \ No newline at end of file diff --git a/releaseDev/devopsbest.json b/releaseDev/devopsbest.json deleted file mode 100644 index e0ce34597..000000000 --- a/releaseDev/devopsbest.json +++ /dev/null @@ -1,1107 +0,0 @@ -{ - "name": "DevOpsBest", - "description": "", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Configured readiness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", - "controlID": "C-0018", - "example": "@controls/examples/c018.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "configured-readiness-probe", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Readiness probe is not configured", - "remediation": "Ensure Readiness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Configured liveness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", - "controlID": "C-0056", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "configured-liveness-probe", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Liveness probe is not configured", - "remediation": "Ensure Liveness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Pods in default namespace", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", - "remediation": "Create necessary namespaces and move all the pods from default namespace there.", - "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - }, - { - "name": "Naked pods", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", - "long_description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", - "test": "Test if pods are not associated with Deployment, ReplicaSet etc. If not, fail.", - "controlID": "C-0073", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "naked-pods", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", - "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "name": "Container runtime socket mounted", - "attributes": { - "controlTypeTags": [ - "devops", - "smartRemediation" - ] - }, - "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", - "remediation": "Remove container runtime socket mount request or define an exception.", - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "containers-mounting-docker-socket", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/run/containerd/containerd.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/crio/crio.sock\"\n}\n" - } - ] - }, - { - "name": "Image pull policy on latest tag", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", - "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", - "test": "If imagePullPolicy = always pass, else fail.", - "controlID": "C-0075", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "image-pull-policy-is-not-set-to-always", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" - } - ] - }, - { - "name": "Label usage for resources", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "controlID": "C-0076", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "label-usage-for-resources", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.recommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.recommendedLabels", - "name": "Recommended Labels", - "description": "Kubescape checks that workloads have at least one label that identifies semantic attributes." - } - ], - "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" - } - ] - }, - { - "name": "K8s common labels usage", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "devops" - ] - }, - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", - "controlID": "C-0077", - "baseScore": 2.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "k8s-common-labels-usage", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.k8sRecommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.k8sRecommendedLabels", - "name": "Kubernetes Recommended Labels", - "description": "Kubescape checks that workloads have at least one of this list of configurable labels, as recommended in the Kubernetes documentation." - } - ], - "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" - } - ] - }, - { - "name": "Deprecated Kubernetes image registry", - "attributes": {}, - "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", - "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", - "controlID": "C-0253", - "baseScore": 5.0, - "example": "@controls/examples/c239.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-identify-old-k8s-registry", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Identifying if pod container images are from deprecated K8s registry", - "remediation": "Use images new registry", - "ruleQuery": "", - "rule": "package armo_builtins\n\ndeprecatedK8sRepo[msga] {\n\tpod := input[_]\n\tpod.metadata.namespace == \"kube-system\"\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecated_registry(image){\n\tstartswith(image, \"k8s.gcr.io/\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.metadata.namespace == \"kube-system\"\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\n" - } - ] - }, - { - "name": "Ensure CPU requests are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the CPU requests are not set.", - "remediation": "Set the CPU requests or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0268", - "baseScore": 3.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-cpu-requests", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "CPU requests are not set.", - "remediation": "Ensure CPU requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ==================================== no CPU requests =============================================\n# Fails if pod does not have container with CPU request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU requests\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU requests\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Ensure memory requests are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the memory requests are not set.", - "remediation": "Set the memory requests or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0269", - "baseScore": 3.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-memory-requests", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "memory requests are not set.", - "remediation": "Ensure memory requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ================================== no memory requests ==================================\n# Fails if pod does not have container with memory requests\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n" - } - ] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-cpu-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "CPU limits are not set.", - "remediation": "Ensure CPU limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-memory-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "memory limits are not set.", - "remediation": "Ensure memory limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0018", - "C-0044", - "C-0056", - "C-0061", - "C-0073", - "C-0074", - "C-0075", - "C-0076", - "C-0077", - "C-0253", - "C-0268", - "C-0269", - "C-0270", - "C-0271" - ] -} \ No newline at end of file diff --git a/releaseDev/exceptions.json b/releaseDev/exceptions.json deleted file mode 100644 index 8f9b40652..000000000 --- a/releaseDev/exceptions.json +++ /dev/null @@ -1,7820 +0,0 @@ -[ - { - "name": "exclude-default-namespace-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "name": "kubescape", - "namespace": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-default-namespace-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-default-namespace-resources-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "default", - "namespace": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-pod-kube-apiserver", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-apiserver-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0013" - }, - { - "controlID": "c-0077" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0020" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0034" - }, - { - "controlID": "c-0016" - }, - { - "controlID": "C-0270" - }, - { - "controlID": "C-0271" - }, - { - "controlID": "c-0048" - }, - { - "controlID": "c-0041" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0076" - }, - { - "controlID": "c-0237" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - }, - { - "controlID": "c-0038" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "operator", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0076" - }, - { - "controlID": "c-0237" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - }, - { - "controlID": "c-0038" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "gateway", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0076" - }, - { - "controlID": "c-0237" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - }, - { - "controlID": "c-0038" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "synchronizer", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0076" - }, - { - "controlID": "c-0237" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - }, - { - "controlID": "c-0038" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubevuln", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0076" - }, - { - "controlID": "c-0237" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - }, - { - "controlID": "c-0038" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "StatefulSet", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0076" - }, - { - "controlID": "c-0237" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - }, - { - "controlID": "c-0038" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "storage", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0076" - }, - { - "controlID": "c-0237" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - }, - { - "controlID": "c-0038" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "otel-collector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0076" - }, - { - "controlID": "c-0237" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - }, - { - "controlID": "c-0038" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "name": "node-agent", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0076" - }, - { - "controlID": "c-0237" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - }, - { - "controlID": "c-0038" - } - ] - }, - { - "name": "exclude-kubescape-deployment-allowed-registry-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-deployment-allowed-registry-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "operator", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-deployment-allowed-registry-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "gateway", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-deployment-allowed-registry-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubevuln", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-deployment-allowed-registry-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "StatefulSet", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0013" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "operator", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0013" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "gateway", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0013" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubevuln", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0013" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "StatefulSet", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0013" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "name": "node-agent", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0045" - }, - { - "controlID": "c-0046" - }, - { - "controlID": "c-0048" - }, - { - "controlID": "c-0057" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0016" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0034" - }, - { - "controlID": "c-0074" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - } - ] - }, - { - "name": "exclude-ks-service-account", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "ks-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-kubescape-service-account", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kubescape-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-kubescape-default-service-account", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "default", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0189" - }, - { - "controlID": "c-0190" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "ks-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0015" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0186" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "storage", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0015" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0186" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kubescape-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0015" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0186" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "node-agent", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0015" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0186" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kubevuln", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0015" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0186" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "storage-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0015" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0186" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "synchronizer", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0015" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0186" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "node-agent-service-account", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0207" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0015" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0186" - } - ] - }, - { - "name": "exclude-kubescape-otel", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "otel-collector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - } - ] - }, - { - "name": "exclude-kubescape-host-scanner-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "name": "host-scanner", - "namespace": "kubescape-host-scanner" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kubescape-host-scanner-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "name": "host-scanner", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-schedulers-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "CronJob", - "name": "kubevuln-schedule-.*", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0026" - }, - { - "controlID": "c-0076" - }, - { - "controlID": "c-0077" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - } - ] - }, - { - "name": "exclude-schedulers-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "CronJob", - "name": "kubescape-registry-scan-.*", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0026" - }, - { - "controlID": "c-0076" - }, - { - "controlID": "c-0077" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - } - ] - }, - { - "name": "exclude-schedulers-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "CronJob", - "name": "kubevuln-scheduler", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0026" - }, - { - "controlID": "c-0076" - }, - { - "controlID": "c-0077" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - } - ] - }, - { - "name": "exclude-schedulers-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "CronJob", - "name": "kubescape-scheduler", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0026" - }, - { - "controlID": "c-0076" - }, - { - "controlID": "c-0077" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - } - ] - }, - { - "name": "exclude-storage-apiserver", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "storage-apiserver", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0034" - }, - { - "controlID": "c-0260" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0076" - } - ] - }, - { - "name": "exclude-ns", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kubescape" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kubescape-prometheus-security-context", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape-prometheus" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0055" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - } - ] - }, - { - "name": "exclude-kubescape-prometheus-deployment-allowed-registry", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape-prometheus" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-prometheus-deployment-ingress-and-egress", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape-prometheus" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "coredns-[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-proxy-[A-Za-z0-9-]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "etcd-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "metadata-proxy-v[0-9.]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "node-local-dns" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "gke-metrics-agent.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "pdcsi-node-windows" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "anetd" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "netd" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentbit-gke-big" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentbit-gke-small" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentbit-gke-max" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentbit-gke.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-14", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nccl-fastsocket-installer" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-15", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "filestore-node" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-16", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "pdcsi-node" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-17", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "ip-masq-agent" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-18", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "anetd-win" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-19", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "gke-metadata-server" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-20", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "gke-metrics-agent-windows" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-21", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "kube-proxy" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-22", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nvidia-gpu-device-plugin" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-23", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nvidia-gpu-device-plugin-large" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-24", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nvidia-gpu-device-plugin-medium" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-25", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "image-package-extractor" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-26", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "CronJob", - "namespace": "kube-system", - "name": "image-package-extractor-cleanup" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-27", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nvidia-gpu-device-plugin-small" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-28", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-29", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-30", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "egress-nat-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-31", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "event-exporter-gke" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-32", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "antrea-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-33", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "antrea-controller-horizontal-autoscaler" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-34", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "kube-dns-autoscaler" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-35", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "metrics-server-v[0-9.]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-36", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "konnectivity-agent-autoscaler" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-37", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentd-elasticsearch" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-38", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "konnectivity-agent" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-39", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "l7-default-backend" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-public-resources", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-public", - "name": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-gke-kube-node-lease-resources", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-node-lease", - "name": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "konnectivity-agent-cpha" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "endpointslicemirroring-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "replicaset-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "endpointslice-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "service-account-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "namespace-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "clusterrole-aggregation-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "generic-garbage-collector" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "certificate-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "daemon-set-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "cloud-provider" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ephemeral-volume-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-14", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "root-ca-cert-publisher" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-16", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "bootstrap-signer" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-18", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "expand-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-19", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "disruption-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-20", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ttl-after-finished-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-21", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "job-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-22", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "pv-protection-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-23", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "persistent-volume-binder" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-24", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "pvc-protection-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-25", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "statefulset-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-26", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "deployment-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-27", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "node-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-28", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "cronjob-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-29", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "resourcequota-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-30", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "endpoint-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-31", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "pod-garbage-collector" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-32", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ttl-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-33", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "token-cleaner" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-34", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-35", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "attachdetach-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-36", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "kube-proxy" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-37", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "konnectivity-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-38", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "replication-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-39", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-40", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "service-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-41", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "kube-dns-autoscaler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-42", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "netd" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-43", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "metadata-proxy" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-44", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "antrea-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-45", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "cilium" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-46", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "node-local-dns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-47", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "gke-metrics-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-48", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "egress-nat-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-49", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "antrea-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-50", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "event-exporter-sa" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-51", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "antrea-cpha" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-52", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "fluentbit-gke" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-53", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "pdcsi-node-sa" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-54", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ip-masq-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-55", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "filestorecsi-node-sa" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-56", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "gke-metadata-server" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-users-and-groups-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "namespace": "kube-system", - "name": "system:vpa-recommender" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-users-and-groups-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "namespace": "kube-system", - "name": "system:anet-operator" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:clustermetrics" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:controller:glbc" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:l7-lb-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:managed-certificate-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:gke-common-webhooks" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:kube-scheduler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:gcp-controller-manager" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:resource-tracker" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:storageversionmigrator" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:kube-controller-manager" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:kubestore-collector" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Group", - "name": "system:masters" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "ca-validate-cfg" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "flowcontrol-guardrails.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "validation-webhook.snapshot.storage.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "gmp-operator.gmp-system.monitoring.googleapis.com" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "warden-validating.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "nodelimit.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "gkepolicy.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "validation-webhook.snapshot.storage.k8s.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "APIService", - "name": "v1beta1.metrics.k8s.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "pod-ready.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "ca-mutate-cfg" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "neg-annotation.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "mutate-scheduler-profile.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-14", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "sasecret-redacter.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-15", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "workload-defaulter.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-16", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "admissionwebhookcontroller.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-17", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "gke-vpa-webhook-config" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-18", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "filestorecsi-mutation-webhook.storage.k8s.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-19", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-20", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "gmp-public" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-21", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "gmp-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kube-controller-manager", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kube-scheduler", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "route-controller", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "superadmin", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "pkgextract-service", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "default", - "namespace": "gmp-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "collector", - "namespace": "gmp-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "operator", - "namespace": "gmp-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "collector", - "namespace": "gmp-public" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "StatefulSet", - "name": "alertmanager", - "namespace": "gmp-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "name": "collector", - "namespace": "gmp-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "rule-evaluator", - "namespace": "gmp-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-14", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "gmp-operator", - "namespace": "gmp-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-15", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "name": "gke-metrics-agent-conf", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-eks-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "aws-node-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-proxy-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "metrics-server-[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "aws-node" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "coredns" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "eventrouter" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "ebs-csi-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "ebs-csi-node" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "ebs-csi-node-windows" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "coredns-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-14", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "metrics-server-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-16", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-17", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "aws-cloud-provider" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-18", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "aws-node" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-19", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "eks-admin" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-20", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "eks-vpc-resource-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-21", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-22", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "tagging-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-23", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "vpc-resource-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-24", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "eventrouter" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-25", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ebs-csi-controller-sa" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-26", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ebs-csi-node-sa" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-27", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "eks:fargate-manager" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-28", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "eks:addon-manager" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-29", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "eks:certificate-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-30", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "eks:node-manager" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-31", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Group", - "name": "system:masters" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-otel", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "otel-collector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - } - ] - }, - { - "name": "exclude-service-accounts-16", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "operator", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0186" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-service-accounts-17", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0186" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-service-accounts-18", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0186" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-service-accounts-19", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "storage-aggregated-apiserver-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0186" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-service-accounts-20", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "storage", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0190" - } - ] - }, - { - "name": "exclude-service-accounts-21", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "node-agent", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0190" - } - ] - }, - { - "name": "exclude-aks-kube-system-deployments-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "coredns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "coredns-autoscaler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "konnectivity-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azuredisk-node-win" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "azure-ip-masq-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "cloud-node-manager" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "cloud-node-manager-windows" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "omsagent-rs" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-pods-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "azure-ip-masq-agent-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "cloud-node-manager-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "coredns-autoscaler--[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "csi-azuredisk-node-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "csi-azurefile-node-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "konnectivity-agent-[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "omsagent-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "omsagent-rs-[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-services-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-services-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azuredisk-node" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azurefile-node" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azurefile-node-win" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "kube-proxy" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "omsagent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "omsagent-win" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-replicasets-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "coredns-autoscaler-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-replicasets-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "coredns-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-replicasets-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "konnectivity-agent-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-replicasets-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "metrics-server-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-replicasets-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "omsagent-rs-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-namespaces-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-public" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-namespaces-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-node-lease" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "azure-cloud-provider" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "cloud-node-manager" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "coredns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "coredns-autoscaler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "csi-azuredisk-node-sa" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "csi-azurefile-node-sa" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-23", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "horizontal-pod-autoscaler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-30", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "omsagent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-46", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "default", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-47", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-node-lease", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-48", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-public", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-49", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "azure-ip-masq-agent-config-reconciled" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-50", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "cluster-autoscaler-status" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-51", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "container-azm-ms-aks-k8scluster" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-52", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "coredns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-53", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "coredns-autoscaler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-54", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "coredns-custom" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-55", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "extension-apiserver-authentication" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-56", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-57", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "omsagent-rs-config" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-58", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "overlay-upgrade-data" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-59", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "aks-webhook-admission-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-60", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "aks-node-mutating-webhook" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-61", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "aks-node-validating-webhook" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-63", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Group", - "name": "system:nodes" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-64", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "clusterAdmin" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-minikube-kube-system-resources-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-proxy-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "sealed-secrets-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "tpu-device-plugin" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "runsc-metric-server" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nvidia-gpu-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-system" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "storage-provisioner" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-scheduler-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-controller-manager-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-kube-system-service-accounts-84", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "storage-provisioner" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-14", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "apiVersion": "rbac.authorization.k8s.io", - "name": "system:kube-scheduler", - "kind": "User" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-15", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "apiVersion": "rbac.authorization.k8s.io", - "name": "system:kube-controller-manager", - "kind": "User" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-16", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "apiVersion": "rbac.authorization.k8s.io", - "name": "system:masters", - "kind": "Group" - } - } - ], - "posturePolicies": [ - {} - ] - } -] \ No newline at end of file diff --git a/releaseDev/frameworks.json b/releaseDev/frameworks.json deleted file mode 100644 index 1a5b22d6f..000000000 --- a/releaseDev/frameworks.json +++ /dev/null @@ -1,11764 +0,0 @@ -[ - { - "name": "DevOpsBest", - "description": "", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Configured readiness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", - "controlID": "C-0018", - "example": "@controls/examples/c018.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Configured liveness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", - "controlID": "C-0056", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Pods in default namespace", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", - "remediation": "Create necessary namespaces and move all the pods from default namespace there.", - "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Naked pods", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", - "long_description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", - "test": "Test if pods are not associated with Deployment, ReplicaSet etc. If not, fail.", - "controlID": "C-0073", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container runtime socket mounted", - "attributes": { - "controlTypeTags": [ - "devops", - "smartRemediation" - ] - }, - "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", - "remediation": "Remove container runtime socket mount request or define an exception.", - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Image pull policy on latest tag", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", - "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", - "test": "If imagePullPolicy = always pass, else fail.", - "controlID": "C-0075", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Label usage for resources", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "controlID": "C-0076", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "K8s common labels usage", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "devops" - ] - }, - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", - "controlID": "C-0077", - "baseScore": 2.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Deprecated Kubernetes image registry", - "attributes": {}, - "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", - "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", - "controlID": "C-0253", - "baseScore": 5.0, - "example": "@controls/examples/c239.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure CPU requests are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the CPU requests are not set.", - "remediation": "Set the CPU requests or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0268", - "baseScore": 3.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure memory requests are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the memory requests are not set.", - "remediation": "Set the memory requests or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0269", - "baseScore": 3.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0018", - "C-0044", - "C-0056", - "C-0061", - "C-0073", - "C-0074", - "C-0075", - "C-0076", - "C-0077", - "C-0253", - "C-0268", - "C-0269", - "C-0270", - "C-0271" - ] - }, - { - "name": "AllControls", - "description": "Contains all the controls from all the frameworks", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Roles with delete capabilities", - "attributes": { - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Access Kubernetes dashboard", - "attributes": { - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", - "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", - "controlID": "C-0014", - "baseScore": 2.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Configured readiness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", - "controlID": "C-0018", - "example": "@controls/examples/c018.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Mount service principal", - "attributes": { - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", - "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", - "controlID": "C-0020", - "baseScore": 4.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Exposed sensitive interfaces", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Initial access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", - "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", - "controlID": "C-0021", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Kubernetes CronJob", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", - "test": "We list all CronJobs that exist in cluster for the user to approve.", - "controlID": "C-0026", - "baseScore": 1.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Delete Kubernetes events", - "attributes": { - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", - "test": "List who has delete/deletecollection RBAC permissions on events.", - "controlID": "C-0031", - "baseScore": 4.0, - "example": "@controls/examples/c031.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Validate admission controller (validating)", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0036", - "baseScore": 3.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Validate admission controller (mutating)", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0039", - "baseScore": 4.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "SSH server running inside container", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", - "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", - "controlID": "C-0042", - "baseScore": 3.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Network mapping", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0049", - "baseScore": 3.0, - "example": "@controls/examples/c049.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Instance Metadata API", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", - "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", - "controlID": "C-0052", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Access container service account", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", - "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", - "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", - "controlID": "C-0053", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster internal networking", - "attributes": { - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Linux hardening", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Configured liveness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", - "controlID": "C-0056", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Pods in default namespace", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", - "remediation": "Create necessary namespaces and move all the pods from default namespace there.", - "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Sudo in container entrypoint", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", - "test": "Check that there is no 'sudo' in the container entrypoint", - "controlID": "C-0062", - "baseScore": 5.0, - "example": "@controls/examples/c062.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "No impersonation", - "attributes": { - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", - "controlID": "C-0065", - "baseScore": 6.0, - "example": "@controls/examples/c065.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "PSP enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Naked pods", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", - "long_description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", - "test": "Test if pods are not associated with Deployment, ReplicaSet etc. If not, fail.", - "controlID": "C-0073", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container runtime socket mounted", - "attributes": { - "controlTypeTags": [ - "devops", - "smartRemediation" - ] - }, - "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", - "remediation": "Remove container runtime socket mount request or define an exception.", - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Image pull policy on latest tag", - "attributes": { - "controlTypeTags": [ - "devops" - ] - }, - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", - "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", - "test": "If imagePullPolicy = always pass, else fail.", - "controlID": "C-0075", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Label usage for resources", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "controlID": "C-0076", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "K8s common labels usage", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "devops" - ] - }, - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", - "controlID": "C-0077", - "baseScore": 2.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0079", - "baseScore": 4.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0081", - "baseScore": 4.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", - "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", - "controlID": "C-0087", - "baseScore": 7.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "RBAC enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-39328-grafana-auth-bypass", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", - "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", - "controlID": "C-0090", - "baseScore": 9.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", - "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", - "controlID": "C-0091", - "baseScore": 8.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0265", - "name": "Authenticated user has sensitive permissions", - "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0005", - "C-0007", - "C-0012", - "C-0013", - "C-0014", - "C-0015", - "C-0016", - "C-0017", - "C-0018", - "C-0020", - "C-0021", - "C-0026", - "C-0030", - "C-0031", - "C-0034", - "C-0035", - "C-0036", - "C-0038", - "C-0039", - "C-0041", - "C-0042", - "C-0044", - "C-0045", - "C-0046", - "C-0048", - "C-0049", - "C-0052", - "C-0053", - "C-0054", - "C-0055", - "C-0056", - "C-0057", - "C-0058", - "C-0059", - "C-0061", - "C-0062", - "C-0063", - "C-0065", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0073", - "C-0074", - "C-0075", - "C-0076", - "C-0077", - "C-0078", - "C-0079", - "C-0081", - "C-0087", - "C-0088", - "C-0090", - "C-0091", - "C-0262", - "C-0265", - "C-0270", - "C-0271" - ] - }, - { - "name": "cis-v1.23-t1.0.1", - "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", - "attributes": { - "armoBuiltin": true, - "version": "v1.0.1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "1": { - "id": "1", - "name": "Control Plane Components", - "subSections": { - "1": { - "id": "1.1", - "name": "Control Plane Node Configuration Files", - "controlsIDs": [ - "C-0092", - "C-0093", - "C-0094", - "C-0095", - "C-0096", - "C-0097", - "C-0098", - "C-0099", - "C-0100", - "C-0101", - "C-0102", - "C-0103", - "C-0104", - "C-0105", - "C-0106", - "C-0107", - "C-0108", - "C-0109", - "C-0110", - "C-0111", - "C-0112" - ] - }, - "2": { - "id": "1.2", - "name": "API Server", - "controlsIDs": [ - "C-0113", - "C-0114", - "C-0115", - "C-0116", - "C-0117", - "C-0118", - "C-0119", - "C-0120", - "C-0121", - "C-0122", - "C-0123", - "C-0124", - "C-0125", - "C-0126", - "C-0127", - "C-0128", - "C-0129", - "C-0130", - "C-0131", - "C-0132", - "C-0133", - "C-0134", - "C-0135", - "C-0136", - "C-0137", - "C-0138", - "C-0139", - "C-0140", - "C-0141", - "C-0142", - "C-0143" - ] - }, - "3": { - "id": "1.3", - "name": "Controller Manager", - "controlsIDs": [ - "C-0144", - "C-0145", - "C-0146", - "C-0147", - "C-0148", - "C-0149", - "C-0150" - ] - }, - "4": { - "id": "1.4", - "name": "Scheduler", - "controlsIDs": [ - "C-0151", - "C-0152" - ] - } - } - }, - "2": { - "name": "etcd", - "id": "2", - "controlsIDs": [ - "C-0153", - "C-0154", - "C-0155", - "C-0156", - "C-0157", - "C-0158", - "C-0159" - ] - }, - "3": { - "name": "Control Plane Configuration", - "id": "3", - "subSections": { - "2": { - "name": "Logging", - "id": "3.2", - "controlsIDs": [ - "C-0160", - "C-0161" - ] - } - } - }, - "4": { - "name": "Worker Nodes", - "id": "4", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "4.1", - "controlsIDs": [ - "C-0162", - "C-0163", - "C-0164", - "C-0165", - "C-0166", - "C-0167", - "C-0168", - "C-0169", - "C-0170", - "C-0171" - ] - }, - "2": { - "name": "Kubelet", - "id": "4.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0182", - "C-0183", - "C-0184" - ] - } - } - }, - "5": { - "name": "Policies", - "id": "5", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "5.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191" - ] - }, - "2": { - "name": "Pod Security Standards", - "id": "5.2", - "controlsIDs": [ - "C-0192", - "C-0193", - "C-0194", - "C-0195", - "C-0196", - "C-0197", - "C-0198", - "C-0199", - "C-0200", - "C-0201", - "C-0202", - "C-0203", - "C-0204" - ] - }, - "3": { - "name": "Network Policies and CNI", - "id": "5.3", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "4": { - "name": "Secrets Management", - "id": "5.4", - "controlsIDs": [ - "C-0207", - "C-0208" - ] - }, - "7": { - "name": "General Policies", - "id": "5.7", - "controlsIDs": [ - "C-0209", - "C-0210", - "C-0211", - "C-0212" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "controlID": "C-0092", - "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0093", - "name": "CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0094", - "name": "CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0095", - "name": "CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0096", - "name": "CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0097", - "name": "CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0098", - "name": "CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0099", - "name": "CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0100", - "name": "CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0101", - "name": "CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0102", - "name": "CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory has permissions of `755`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0103", - "name": "CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0104", - "name": "CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, admin.conf has permissions of `600`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0105", - "name": "CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0106", - "name": "CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0107", - "name": "CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0108", - "name": "CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0109", - "name": "CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0110", - "name": "CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0111", - "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0112", - "name": "CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0113", - "name": "CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the API server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0114", - "name": "CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", - "description": "Do not use token based authentication.", - "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0115", - "name": "CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0116", - "name": "CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", - "description": "Enable certificate based kubelet authentication.", - "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, certificate-based kubelet authentication is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0117", - "name": "CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", - "description": "Verify kubelet's certificate before establishing connection.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0118", - "name": "CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not always authorize all requests.", - "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Only authorized requests will be served.", - "default_value": "By default, `AlwaysAllow` is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0119", - "name": "CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, `Node` authorization is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0120", - "name": "CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", - "description": "Turn on Role Based Access Control.", - "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", - "default_value": "By default, `RBAC` authorization is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0121", - "name": "CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", - "description": "Limit the rate at which the API server accepts requests.", - "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "You need to carefully tune in limits as per your environment.", - "default_value": "By default, `EventRateLimit` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0122", - "name": "CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", - "description": "Do not allow all requests.", - "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", - "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0123", - "name": "CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", - "description": "Always pull images.", - "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" - ], - "attributes": {}, - "baseScore": 4, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", - "default_value": "By default, `AlwaysPullImages` is not set.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0124", - "name": "CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", - "default_value": "By default, `SecurityContextDeny` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0125", - "name": "CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", - "description": "Automate service accounts management.", - "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "None.", - "default_value": "By default, `ServiceAccount` is set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0126", - "name": "CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", - "description": "Reject creating objects in a namespace that is undergoing termination.", - "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "None", - "default_value": "By default, `NamespaceLifecycle` is set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0127", - "name": "CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `NodeRestriction` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0128", - "name": "CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", - "description": "Do not disable the secure port.", - "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "You need to set the API Server up with the right TLS certificates.", - "default_value": "By default, port 6443 is used as the secure port.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0129", - "name": "CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0130", - "name": "CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0131", - "name": "CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", - "description": "Retain the logs for at least 30 days or as appropriate.", - "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0132", - "name": "CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", - "description": "Retain 10 or an appropriate number of old log files.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0133", - "name": "CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0134", - "name": "CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", - "description": "Set global request timeout for API server requests as appropriate.", - "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--request-timeout` is set to 60 seconds.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0135", - "name": "CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", - "description": "Validate service account before validating token.", - "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `--service-account-lookup` argument is set to `true`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0136", - "name": "CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-key-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0137", - "name": "CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0138", - "name": "CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0139", - "name": "CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0140", - "name": "CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-cafile` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0141", - "name": "CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", - "description": "Encrypt etcd key-value store.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `--encryption-provider-config` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0142", - "name": "CIS-1.2.30 Ensure that encryption providers are appropriately configured", - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, no encryption provider is set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0143", - "name": "CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0144", - "name": "CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", - "description": "Activate garbage collector on pod termination, as appropriate.", - "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0145", - "name": "CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0146", - "name": "CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", - "description": "Use individual service account credentials for each controller.", - "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" - ], - "attributes": {}, - "baseScore": 4, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", - "default_value": "By default, `--use-service-account-credentials` is set to false.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0147", - "name": "CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-private-key-file` it not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0148", - "name": "CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "You need to setup and maintain root certificate authority file.", - "default_value": "By default, `--root-ca-file` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0149", - "name": "CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation on controller-manager.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0150", - "name": "CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0151", - "name": "CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0152", - "name": "CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0153", - "name": "CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", - "description": "Configure TLS encryption for the etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Client connections only over TLS would be served.", - "default_value": "By default, TLS encryption is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0154", - "name": "CIS-2.2 Ensure that the --client-cert-auth argument is set to true", - "description": "Enable client authentication on etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", - "default_value": "By default, the etcd service can be queried by unauthenticated clients.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0155", - "name": "CIS-2.3 Ensure that the --auto-tls argument is not set to true", - "description": "Do not use self-signed certificates for TLS.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", - "default_value": "By default, `--auto-tls` is set to `false`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0156", - "name": "CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0157", - "name": "CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", - "description": "etcd should be configured for peer authentication.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", - "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0158", - "name": "CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0159", - "name": "CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", - "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", - "default_value": "By default, no etcd certificate is created and used.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0160", - "name": "CIS-3.2.1 Ensure that a minimal audit policy is created", - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", - "remediation": "Create an audit policy file for your cluster.", - "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", - "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0161", - "name": "CIS-3.2.2 Ensure that the audit policy covers key security concerns", - "description": "Ensure that the audit policy created for the cluster covers key security concerns.", - "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", - "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", - "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", - "default_value": "By default Kubernetes clusters do not log audit information.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0162", - "name": "CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kubelet` service file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0163", - "name": "CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0164", - "name": "CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, proxy file has permissions of `640`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0165", - "name": "CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `proxy` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0166", - "name": "CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file has permissions of `600`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0167", - "name": "CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0168", - "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0169", - "name": "CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0170", - "name": "CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0171", - "name": "CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0172", - "name": "CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0173", - "name": "CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0174", - "name": "CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0175", - "name": "CIS-4.2.4 Verify that the --read-only-port argument is set to 0", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0176", - "name": "CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0177", - "name": "CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "By default, `--protect-kernel-defaults` is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0178", - "name": "CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0179", - "name": "CIS-4.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", - "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", - "default_value": "By default, `--hostname-override` argument is not set.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0180", - "name": "CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "By default, `--event-qps` argument is set to `5`.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0181", - "name": "CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the Kubelets.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0182", - "name": "CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", - "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet client certificate rotation is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0183", - "name": "CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet server certificate rotation is enabled.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0184", - "name": "CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.5 Ensure that default service accounts are not actively used", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "controlID": "C-0191", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0192", - "name": "CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", - "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", - "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", - "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", - "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", - "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0193", - "name": "CIS-5.2.2 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of privileged containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0194", - "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0195", - "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0196", - "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0197", - "name": "CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0198", - "name": "CIS-5.2.7 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0199", - "name": "CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", - "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0200", - "name": "CIS-5.2.9 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, there are no restrictions on adding capabilities to containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0201", - "name": "CIS-5.2.10 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0202", - "name": "CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", - "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", - "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0203", - "name": "CIS-5.2.12 Minimize the admission of HostPath volumes", - "description": "Do not generally admit containers which make use of `hostPath` volumes.", - "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0204", - "name": "CIS-5.2.13 Minimize the admission of containers which use HostPorts", - "description": "Do not generally permit containers which require the use of HostPorts.", - "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the use of HostPorts.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0205", - "name": "CIS-5.3.1 Ensure that the CNI in use supports Network Policies", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "This will depend on the CNI plugin in use.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", - "default_value": "By default, network policies are not created.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "category": { - "name": "Workload", - "subCategory": { - "name": "Secrets", - "id": "Cat-3" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.4.2 Consider external secret storage", - "controlID": "C-0208", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "manual_test": "Review your secrets management implementation.", - "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" - ], - "attributes": {}, - "baseScore": 5, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.7.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", - "controlID": "C-0210", - "description": "Enable `docker/default` seccomp profile in your pod definitions.", - "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", - "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", - "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.7.3 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" - ], - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.7.4 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0092", - "C-0093", - "C-0094", - "C-0095", - "C-0096", - "C-0097", - "C-0098", - "C-0099", - "C-0100", - "C-0101", - "C-0102", - "C-0103", - "C-0104", - "C-0105", - "C-0106", - "C-0107", - "C-0108", - "C-0109", - "C-0110", - "C-0111", - "C-0112", - "C-0113", - "C-0114", - "C-0115", - "C-0116", - "C-0117", - "C-0118", - "C-0119", - "C-0120", - "C-0121", - "C-0122", - "C-0123", - "C-0124", - "C-0125", - "C-0126", - "C-0127", - "C-0128", - "C-0129", - "C-0130", - "C-0131", - "C-0132", - "C-0133", - "C-0134", - "C-0135", - "C-0136", - "C-0137", - "C-0138", - "C-0139", - "C-0140", - "C-0141", - "C-0142", - "C-0143", - "C-0144", - "C-0145", - "C-0146", - "C-0147", - "C-0148", - "C-0149", - "C-0150", - "C-0151", - "C-0152", - "C-0153", - "C-0154", - "C-0155", - "C-0156", - "C-0157", - "C-0158", - "C-0159", - "C-0160", - "C-0161", - "C-0162", - "C-0163", - "C-0164", - "C-0165", - "C-0166", - "C-0167", - "C-0168", - "C-0169", - "C-0170", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0182", - "C-0183", - "C-0184", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191", - "C-0192", - "C-0193", - "C-0194", - "C-0195", - "C-0196", - "C-0197", - "C-0198", - "C-0199", - "C-0200", - "C-0201", - "C-0202", - "C-0203", - "C-0204", - "C-0205", - "C-0206", - "C-0207", - "C-0208", - "C-0209", - "C-0210", - "C-0211", - "C-0212" - ] - }, - { - "name": "SOC2", - "description": "SOC2 compliance related controls", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Firewall (CC6.1,CC6.6,CC7.2)", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management.", - "remediation": "Define network policies for all workloads to protect unwanted access", - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [], - "long_description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management." - }, - { - "name": "Cryptographic key management - misplaced secrets (CC6.1,CC6.6,CC6.7)", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cryptographic key management - minimize access to secrets (CC6.1,CC6.6,CC6.7)", - "controlID": "C-0186", - "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", - "long_description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Access restriction to infrastructure - admin access (CC6.1 ,CC6.2, CC6.7, CC6.8)", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Event logging (CC6.8,CC7.1,CC7.2)", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers.", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers: - Logon attempts - Data deletions - Application and system errors - Changes to software and configuration settings - Changes to system files, configuration files or content files The logs are monitored by IT Operations staff and significant issues are investigated and resolved within a timely manner.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Data in motion encryption - Ingress is TLS encrypted (CC6.1,CC6.6,CC6.7)", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", - "remediation": "The user needs to implement TLS for the Ingress resource in order to encrypt the incoming traffic", - "test": "Check if the Ingress resource has TLS configured", - "controlID": "C-0263", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [], - "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." - } - ], - "ControlsIDs": [ - "C-0260", - "C-0012", - "C-0186", - "C-0035", - "C-0067", - "C-0263" - ] - }, - { - "name": "MITRE", - "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Roles with delete capabilities", - "attributes": { - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Access Kubernetes dashboard", - "attributes": { - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", - "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", - "controlID": "C-0014", - "baseScore": 2.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Mount service principal", - "attributes": { - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", - "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", - "controlID": "C-0020", - "baseScore": 4.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Exposed sensitive interfaces", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Initial access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", - "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", - "controlID": "C-0021", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Kubernetes CronJob", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", - "test": "We list all CronJobs that exist in cluster for the user to approve.", - "controlID": "C-0026", - "baseScore": 1.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Delete Kubernetes events", - "attributes": { - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", - "test": "List who has delete/deletecollection RBAC permissions on events.", - "controlID": "C-0031", - "baseScore": 4.0, - "example": "@controls/examples/c031.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Validate admission controller (validating)", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0036", - "baseScore": 3.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CoreDNS poisoning", - "attributes": { - "microsoftMitreColumns": [ - "Lateral Movement" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", - "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", - "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", - "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", - "controlID": "C-0037", - "baseScore": 4.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Validate admission controller (mutating)", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0039", - "baseScore": 4.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "SSH server running inside container", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", - "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", - "controlID": "C-0042", - "baseScore": 3.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Instance Metadata API", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", - "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", - "controlID": "C-0052", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Access container service account", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", - "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", - "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", - "controlID": "C-0053", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster internal networking", - "attributes": { - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "PSP enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0007", - "C-0012", - "C-0014", - "C-0015", - "C-0020", - "C-0021", - "C-0026", - "C-0031", - "C-0035", - "C-0036", - "C-0037", - "C-0039", - "C-0042", - "C-0045", - "C-0048", - "C-0052", - "C-0053", - "C-0054", - "C-0057", - "C-0058", - "C-0059", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070" - ] - }, - { - "name": "NSA", - "description": "Implement NSA security advices for K8s ", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster internal networking", - "attributes": { - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Linux hardening", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "PSP enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0005", - "C-0012", - "C-0013", - "C-0016", - "C-0017", - "C-0030", - "C-0034", - "C-0035", - "C-0038", - "C-0041", - "C-0044", - "C-0046", - "C-0054", - "C-0055", - "C-0057", - "C-0058", - "C-0059", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0270", - "C-0271" - ] - }, - { - "name": "cis-eks-t1.2.0", - "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", - "attributes": { - "armoBuiltin": true, - "version": "v1.2.0" - }, - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "2": { - "name": "Control Plane Configuration", - "id": "2", - "subSections": { - "1": { - "name": "Logging", - "id": "2.1", - "controlsIDs": [ - "C-0067" - ] - } - } - }, - "3": { - "name": "Worker Nodes", - "id": "3", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "3.1", - "controlsIDs": [ - "C-0167", - "C-0171", - "C-0235", - "C-0238" - ] - }, - "2": { - "name": "Kubelet", - "id": "3.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0183" - ] - }, - "3": { - "name": "Container Optimized OS", - "id": "3.3", - "controlsIDs": [ - "C-0226" - ] - } - } - }, - "4": { - "name": "Policies", - "id": "4", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "4.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191", - "C-0246" - ] - }, - "2": { - "name": "Pod Security Policies", - "id": "4.2", - "controlsIDs": [ - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0220" - ] - }, - "3": { - "name": "CNI Plugin", - "id": "4.3", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "4": { - "name": "Secrets Management", - "id": "4.4", - "controlsIDs": [ - "C-0207", - "C-0234" - ] - }, - "6": { - "name": "General Policies", - "id": "4.6", - "controlsIDs": [ - "C-0209", - "C-0211", - "C-0212" - ] - } - } - }, - "5": { - "name": "Managed services", - "id": "5", - "subSections": { - "1": { - "name": "Image Registry and Image Scanning", - "id": "5.1", - "controlsIDs": [ - "C-0078", - "C-0221", - "C-0222", - "C-0223" - ] - }, - "2": { - "name": "Identity and Access Management (IAM)", - "id": "5.2", - "controlsIDs": [ - "C-0225" - ] - }, - "3": { - "name": "AWS EKS Key Management Service", - "id": "5.3", - "controlsIDs": [ - "C-0066" - ] - }, - "4": { - "name": "Cluster Networking", - "id": "5.4", - "controlsIDs": [ - "C-0227", - "C-0228", - "C-0229", - "C-0230", - "C-0231" - ] - }, - "5": { - "name": "Authentication and Authorization", - "id": "5.5", - "controlsIDs": [ - "C-0232" - ] - }, - "6": { - "name": "Other Cluster Configurations", - "id": "5.6", - "controlsIDs": [ - "C-0233", - "C-0242" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation.", - "remediation": "This process can only be performed during Cluster Creation.\n\n Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section.", - "long_description": "Kubernetes can store secrets that pods can access via a mounted volume. Today, Kubernetes secrets are stored with Base64 encoding, but encrypting is the recommended approach. Amazon EKS clusters version 1.13 and higher support the capability of encrypting your Kubernetes secrets using AWS Key Management Service (KMS) Customer Managed Keys (CMK). The only requirement is to enable the encryption provider support during EKS cluster creation.\n\n Use AWS Key Management Service (KMS) keys to provide envelope encryption of Kubernetes secrets stored in Amazon EKS. Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.\n\n Application-layer Secrets Encryption provides an additional layer of security for sensitive data, such as user defined Secrets and Secrets required for the operation of the cluster, such as service account keys, which are all stored in etcd.\n\n Using this functionality, you can use a key, that you manage in AWS KMS, to encrypt data at the application layer. This protects against attackers in the event that they manage to gain access to etcd.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [], - "manual_test": "Using the etcdctl commandline, read that secret out of etcd:\n\n \n```\netcdCTL_API=3 etcdctl get /registry/secrets/default/secret1 [...] | hexdump -C\n\n```\n where [...] must be the additional arguments for connecting to the etcd server.\n\n Verify the stored secret is prefixed with k8s:enc:aescbc:v1: which indicates the aescbc provider has encrypted the resulting data.", - "references": [ - "https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-eks-adds-envelope-encryption-for-secrets-with-aws-kms/" - ], - "impact_statement": "", - "default_value": "By default secrets created using the Kubernetes API are stored in *tmpfs* and are encrypted at rest." - }, - { - "name": "CIS-2.1.1 Enable audit Logs", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Control plane logs provide visibility into operation of the EKS Control plane component systems. The API server audit logs record all accepted and rejected requests in the cluster. When enabled via EKS configuration the control plane logs for a cluster are exported to a CloudWatch Log Group for persistence.", - "remediation": "**From Console:**\n\n 1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n\n \n```\nAPI server: Enabled\nAudit: Enabled\t\nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n\n```\n 5. Click 'Save Changes'.\n\n **From CLI:**\n\n \n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n\n```", - "long_description": "Audit logs enable visibility into all API server requests from authentic and anonymous sources. Stored log data can be analyzed manually or with tools to identify and understand anomalous or negative activity and lead to intelligent remediations.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.4 Minimize Container Registries to only those approved", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Use approved container registries.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [], - "references": [ - "https://aws.amazon.com/blogs/opensource/using-open-policy-agent-on-amazon-eks/" - ], - "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry.", - "default_value": "" - }, - { - "controlID": "C-0167", - "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", - "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on each worker node.\n\n For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AWS EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0171", - "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the AWS EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0172", - "name": "CIS-3.2.1 Ensure that the Anonymous Auth is Not Enabled", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Disable Anonymous Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--anonymous-auth=false\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the[Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Anonymous Authentication is not enabled. This may be configured as a command line argument to the kubelet service with `--anonymous-auth=false` or in the kubelet configuration file via `\"authentication\": { \"anonymous\": { \"enabled\": false }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with `kubectl` on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Anonymous Authentication is not enabled checking that `\"authentication\": { \"anonymous\": { \"enabled\": false }` is in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "See the EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0173", - "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets can be configured to allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Enable Webhook Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n\n```\n Next, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n \n```\n\"authorization\": { \"mode\": \"Webhook }\n\n```\n Finer detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Webhook Authentication is enabled. This may be enabled as a command line argument to the kubelet service with `--authentication-token-webhook` or in the kubelet configuration file via `\"authentication\": { \"webhook\": { \"enabled\": true } }`.\n\n Verify that the Authorization Mode is set to `WebHook`. This may be set as a command line argument to the kubelet service with `--authorization-mode=Webhook` or in the configuration file via `\"authorization\": { \"mode\": \"Webhook }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Webhook Authentication is enabled with `\"authentication\": { \"webhook\": { \"enabled\": true } }` in the API response.\n\n Verify that the Authorization Mode is set to `WebHook` with `\"authorization\": { \"mode\": \"Webhook }` in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "See the EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0174", - "name": "CIS-3.2.3 Ensure that a Client CA File is Configured", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Configure the client certificate authority file by setting the following parameter appropriately:\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--client-ca-file=\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that a client certificate authority file is configured. This may be configured using a command line argument to the kubelet service with `--client-ca-file` or in the kubelet configuration file via `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that a client certificate authority file is configured with `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"` in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "See the EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0175", - "name": "CIS-3.2.4 Ensure that the --read-only-port is disabled", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0\n\n \n```\n\"readOnlyPort\": 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For each remediation:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0176", - "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/pull/18552" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "See the EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0177", - "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/" - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "See the EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0178", - "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the above command includes the argument `--make-iptables-util-chains` then verify it is set to true.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains.:true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0179", - "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", - "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/issues/22063", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0180", - "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0181", - "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not present or is set to true", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", - "references": [ - "https://github.com/kubernetes/kubernetes/pull/41912", - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration", - "https://kubernetes.io/docs/imported/release/notes/", - "https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0183", - "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"featureGates\": {\n \"RotateKubeletServerCertificate\":true\n},\n\n```\n Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediation methods:**\nRestart the `kubelet` service and check status. The example below is for when using systemctl to manage services:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--rotate-kubelet-server-certificate` executable argument verify that it is set to true.\n\n If the process does not have the `--rotate-kubelet-server-certificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists in the `featureGates` section and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://github.com/kubernetes/kubernetes/pull/45059", - "https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the Amazon EKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "https://kubernetes.io/docs/admin/authorization/rbac/#user-facing-roles" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - "https://aws.github.io/aws-eks-best-practices/iam/#disable-auto-mounting-of-service-account-tokens" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "controlID": "C-0191", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", - "references": [ - "https://www.impidio.com/blog/kubernetes-rbac-security-pitfalls", - "https://raesene.github.io/blog/2020/12/12/Escalating_Away/", - "https://raesene.github.io/blog/2021/01/16/Getting-Into-A-Bind-with-Kubernetes/" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0205", - "name": "CIS-4.3.1 Ensure CNI plugin supports network policies.", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", - "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports network policies.", - "references": [ - "https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/", - "https://aws.github.io/aws-eks-best-practices/network/" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None.", - "default_value": "This will depend on the CNI plugin in use.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.3.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "https://kubernetes.io/docs/concepts/services-networking/networkpolicies/", - "https://octetz.com/posts/k8s-network-policy-apis", - "https://kubernetes.io/docs/tasks/configure-pod-container/declare-network-policy/" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "default_value": "By default, network policies are not created.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.4.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "category": { - "name": "Workload", - "subCategory": { - "name": "Secrets", - "id": "Cat-3" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.6.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Amazon EKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "default_value": "By default, Kubernetes starts with two initial namespaces:\n\n 1. `default` - The default namespace for objects with no other namespace\n2. `kube-system` - The namespace for objects created by the Kubernetes system\n3. `kube-public` - The namespace for public-readable ConfigMap\n4. `kube-node-lease` - The namespace for associated lease object for each node", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.6.2 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" - ], - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.6.3 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0213", - "name": "CIS-4.2.1 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" - ], - "attributes": {}, - "baseScore": 8.0, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0214", - "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0215", - "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0216", - "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0217", - "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0218", - "name": "CIS-4.2.6 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0219", - "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0220", - "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0221", - "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", - "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", - "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0222", - "name": "CIS-5.1.2 Minimize user access to Amazon ECR", - "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0223", - "name": "CIS-5.1.3 Minimize cluster access to read-only for Amazon ECR", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", - "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", - "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0225", - "name": "CIS-5.2.1 Prefer using dedicated EKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", - "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0226", - "name": "CIS-3.3.1 Prefer using a container-optimized OS when possible", - "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", - "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", - "remediation": "", - "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", - "references": [ - "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", - "https://aws.amazon.com/bottlerocket/" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", - "default_value": "A container-optimized OS is not the default.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0227", - "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": {}, - "baseScore": 8.0, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", - "default_value": "By default, Endpoint Public Access is disabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0228", - "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "Check for private endpoint access to the Kubernetes API server", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": {}, - "baseScore": 8.0, - "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", - "default_value": "By default, the Public Endpoint is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0229", - "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", - "manual_test": "", - "references": [], - "attributes": {}, - "baseScore": 8.0, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0230", - "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", - "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", - "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", - "remediation": "", - "manual_test": "", - "references": [], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0231", - "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0232", - "name": "CIS-5.5.1 Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", - "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", - "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", - "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", - "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", - "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0233", - "name": "CIS-5.6.1 Consider Fargate for running untrusted workloads", - "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", - "long_description": "", - "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "", - "default_value": "By default, AWS Fargate is not utilized.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0234", - "name": "CIS-4.4.2 Consider external secret storage", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "manual_test": "Review your secrets management implementation.", - "references": [], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0235", - "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0238", - "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", - "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0242", - "name": "CIS-5.6.2 Hostile multi-tenant workloads", - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0246", - "name": "CIS-4.1.7 Avoid use of system:masters group", - "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", - "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", - "remediation": "Remove the `system:masters` group from all users in the cluster.", - "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", - "references": [ - "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", - "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0066", - "C-0067", - "C-0078", - "C-0167", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0183", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191", - "C-0205", - "C-0206", - "C-0207", - "C-0209", - "C-0211", - "C-0212", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0220", - "C-0221", - "C-0222", - "C-0223", - "C-0225", - "C-0226", - "C-0227", - "C-0228", - "C-0229", - "C-0230", - "C-0231", - "C-0232", - "C-0233", - "C-0234", - "C-0235", - "C-0238", - "C-0242", - "C-0246" - ] - }, - { - "name": "cis-aks-t1.2.0", - "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", - "attributes": { - "armoBuiltin": true, - "version": "v1.2.0" - }, - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "2": { - "name": "Master (Control Plane) Configuration", - "id": "2", - "subSections": { - "1": { - "name": "Logging", - "id": "2.1", - "controlsIDs": [ - "C-0254" - ] - } - } - }, - "3": { - "name": "Worker Nodes", - "id": "3", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "3.1", - "controlsIDs": [ - "C-0167", - "C-0171", - "C-0235", - "C-0238" - ] - }, - "2": { - "name": "Kubelet", - "id": "3.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0182", - "C-0183" - ] - } - } - }, - "4": { - "name": "Policies", - "id": "4", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "4.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190" - ] - }, - "2": { - "name": "Pod Security Standards", - "id": "4.2", - "controlsIDs": [ - "C-0201", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219" - ] - }, - "3": { - "name": "Azure Policy / OPA", - "id": "4.3", - "controlsIDs": [] - }, - "4": { - "name": "CNI Plugin", - "id": "4.4", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "5": { - "name": "Secrets Management", - "id": "4.5", - "controlsIDs": [ - "C-0207", - "C-0208" - ] - }, - "6": { - "name": "Extensible Admission Control", - "id": "4.6", - "controlsIDs": [] - }, - "7": { - "name": "General Policies", - "id": "4.7", - "controlsIDs": [ - "C-0209", - "C-0211", - "C-0212" - ] - } - } - }, - "5": { - "name": "Managed services", - "id": "5", - "subSections": { - "1": { - "name": "Image Registry and Image Scanning", - "id": "5.1", - "controlsIDs": [ - "C-0078", - "C-0243", - "C-0250", - "C-0251" - ] - }, - "2": { - "name": "Access and identity options for Azure Kubernetes Service (AKS)", - "id": "5.2", - "controlsIDs": [ - "C-0239", - "C-0241" - ] - }, - "3": { - "name": "Key Management Service (KMS)", - "id": "5.3", - "controlsIDs": [ - "C-0244" - ] - }, - "4": { - "name": "Cluster Networking", - "id": "5.4", - "controlsIDs": [ - "C-0240", - "C-0245", - "C-0247", - "C-0248", - "C-0252" - ] - }, - "5": { - "name": "Authentication and Authorization", - "id": "5.5", - "controlsIDs": [ - "C-0088" - ] - }, - "6": { - "name": "Other Cluster Configurations", - "id": "5.6", - "controlsIDs": [ - "C-0242", - "C-0249" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "name": "CIS-5.1.4 Minimize Container Registries to only those approved", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Use approved container registries.", - "remediation": "If you are using Azure Container Registry you have this option:\n\n For other non-AKS repos using admission controllers or Azure Policy will also work.\n\n Limiting or locking down egress traffic is also recommended:\n", - "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [], - "references": [ - "\n\n \n\n " - ], - "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry." - }, - { - "name": "CIS-5.5.1 Manage Kubernetes RBAC users with Azure AD", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "long_description": "Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators.", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [], - "references": [ - "\n\n " - ] - }, - { - "controlID": "C-0167", - "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", - "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0171", - "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0172", - "name": "CIS-3.2.1 Ensure that the --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"anonymous\": \"enabled\": false\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--anonymous-auth=false\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*anonymous\":{\"enabled\":false}\"` by extracting the live configuration from the nodes running kubelet.\\*\\*See detailed step-by-step configmap procedures in[Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": { \"anonymous\": { \"enabled\": false }` argument is set to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"anonymous\":{\"enabled\":false}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0173", - "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\"... \"webhook\":{\"enabled\":true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--authorization-mode=Webhook\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"authentication\": \"webhook\": \"enabled\"` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": {\"webhook\": { \"enabled\": is set to true`.\n\n If the `\"authentication\": {\"mode\": {` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `\"authentication\": {\"mode\": {` to something other than `AlwaysAllow`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"webhook\":{\"enabled\":true}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0174", - "name": "CIS-3.2.3 Ensure that the --client-ca-file argument is set as appropriate", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile:\" to the location of the client CA file.\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--client-ca-file=\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"x509\": {\"clientCAFile:\"` set to the location of the client certificate authority file.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"x509\": {\"clientCAFile:\"` argument exists and is set to the location of the client certificate authority file.\n\n If the `\"x509\": {\"clientCAFile:\"` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `\"authentication\": { \"x509\": {\"clientCAFile:\"` to the location of the client certificate authority file.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication.. x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0175", - "name": "CIS-3.2.4 Ensure that the --read-only-port is secured", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\nreadOnlyPort to 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For all remediations:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0176", - "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0177", - "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0178", - "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `makeIPTablesUtilChains` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that if the `makeIPTablesUtilChains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0179", - "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", - "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 3, - "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", - "default_value": "See the Azure AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0180", - "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "See the AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0182", - "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not set to false", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", - "references": [ - "\n\n \n\n \n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0183", - "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateKubeletServerCertificate\":true\n\n```\n **Remediation Method 2:**\n\n If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`.\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `RotateKubeletServerCertificate` is set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AKS documentation for the default value.", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0201", - "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0205", - "name": "CIS-4.4.1 Ensure latest CNI version is used", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", - "manual_test": "Ensure CNI plugin supports network policies.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None.", - "default_value": "This will depend on the CNI plugin in use.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.4.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "default_value": "By default, network policies are not created.", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.5.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "category": { - "name": "Workload", - "subCategory": { - "name": "Secrets", - "id": "Cat-3" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.5.2 Consider external secret storage", - "controlID": "C-0208", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "manual_test": "Review your secrets management implementation.", - "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 5, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.7.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Azure AKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "\n\n \n\n \n\n ." - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "default_value": "When you create an AKS cluster, the following namespaces are available:\n\n NAMESPACES\nNamespace Description\ndefault Where pods and deployments are created by default when none is provided. In smaller environments, you can deploy applications directly into the default namespace without creating additional logical separations. When you interact with the Kubernetes API, such as with kubectl get pods, the default namespace is used when none is specified.\nkube-system Where core resources exist, such as network features like DNS and proxy, or the Kubernetes dashboard. You typically don't deploy your own applications into this namespace.\nkube-public Typically not used, but can be used for resources to be visible across the whole cluster, and can be viewed by any user.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.7.2 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this:\n\n \n```\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: restricted\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n # Assume that persistentVolumes set up by the cluster admin are safe to use.\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'MustRunAsNonRoot'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n\n```\n This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added.\n\n Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.7.3 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get all -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0213", - "name": "CIS-4.2.1 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n as an alternative AZ CLI can be used:\n\n \n```\naz aks list --output yaml\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 8.0, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an AKS cluster, the value of \"enablePodSecurityPolicy\" is null.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0214", - "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0215", - "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0216", - "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0217", - "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0218", - "name": "CIS-4.2.6 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0219", - "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 5.0, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0235", - "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6.0, - "impact_statement": "None.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0238", - "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "description": "If `kubelet` is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", - "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "None.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0239", - "name": "CIS-5.2.1 Prefer using dedicated AKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0240", - "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", - "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", - "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0241", - "name": "CIS-5.2.2 Use Azure RBAC for Kubernetes Authorization", - "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", - "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", - "remediation": "Set Azure RBAC as access system.", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0242", - "name": "CIS-5.6.2 Hostile multi-tenant workloads", - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0243", - "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", - "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0244", - "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted", - "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0245", - "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0247", - "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", - "default_value": "By default, Endpoint Private Access is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0248", - "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0249", - "name": "CIS-5.6.1 Restrict untrusted workloads", - "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", - "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "actionRequired": "manual review" - }, - "baseScore": 5, - "impact_statement": "", - "default_value": "ACI is not a default component of the AKS", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0250", - "name": "CIS-5.1.2 Minimize cluster access to read-only for Azure Container Registry (ACR)", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", - "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0251", - "name": "CIS-5.1.3 Minimize user access to Azure Container Registry (ACR)", - "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", - "manual_test": "", - "references": [ - "" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0252", - "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", - "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": {}, - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0254", - "name": "CIS-2.1.1 Enable audit Logs", - "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", - "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", - "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", - "default_value": "By default, cluster control plane logs aren't sent to be Logged.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0078", - "C-0088", - "C-0167", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0182", - "C-0183", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0201", - "C-0205", - "C-0206", - "C-0207", - "C-0208", - "C-0209", - "C-0211", - "C-0212", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0235", - "C-0238", - "C-0239", - "C-0240", - "C-0241", - "C-0242", - "C-0243", - "C-0244", - "C-0245", - "C-0247", - "C-0248", - "C-0249", - "C-0250", - "C-0251", - "C-0252", - "C-0254" - ] - }, - { - "name": "ArmoBest", - "description": "", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Network mapping", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0049", - "baseScore": 3.0, - "example": "@controls/examples/c049.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster internal networking", - "attributes": { - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Linux hardening", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Pods in default namespace", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", - "remediation": "Create necessary namespaces and move all the pods from default namespace there.", - "long_description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the pods running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Sudo in container entrypoint", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", - "test": "Check that there is no 'sudo' in the container entrypoint", - "controlID": "C-0062", - "baseScore": 5.0, - "example": "@controls/examples/c062.yaml", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "No impersonation", - "attributes": { - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", - "controlID": "C-0065", - "baseScore": 6.0, - "example": "@controls/examples/c065.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "PSP enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0079", - "baseScore": 4.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0081", - "baseScore": 4.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", - "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", - "controlID": "C-0087", - "baseScore": 7.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-3172-aggregated-API-server-redirect", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [] - }, - "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", - "controlID": "C-0089", - "baseScore": 3.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "controlTypeTags": [ - "security" - ] - }, - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", - "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", - "controlID": "C-0091", - "baseScore": 8.0, - "example": "", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0236", - "name": "Verify image signature", - "description": "Verifies the signature of each image with given public keys", - "long_description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "manual_test": "", - "references": [], - "attributes": { - "actionRequired": "configuration" - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0237", - "name": "Check if signature exists", - "description": "Ensures that all images contain some signature", - "long_description": "Verifies that each image is signed", - "remediation": "Replace the image with a signed image", - "manual_test": "", - "references": [], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0005", - "C-0012", - "C-0013", - "C-0016", - "C-0017", - "C-0030", - "C-0034", - "C-0035", - "C-0038", - "C-0041", - "C-0044", - "C-0046", - "C-0049", - "C-0054", - "C-0055", - "C-0057", - "C-0058", - "C-0059", - "C-0061", - "C-0062", - "C-0063", - "C-0065", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0078", - "C-0079", - "C-0081", - "C-0087", - "C-0089", - "C-0091", - "C-0236", - "C-0237", - "C-0270", - "C-0271" - ] - } -] \ No newline at end of file diff --git a/releaseDev/mitre.json b/releaseDev/mitre.json deleted file mode 100644 index 06fca2d7d..000000000 --- a/releaseDev/mitre.json +++ /dev/null @@ -1,2112 +0,0 @@ -{ - "name": "MITRE", - "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Roles with delete capabilities", - "attributes": { - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-excessive-delete-rights-v1", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - } - ] - }, - { - "name": "Access Kubernetes dashboard", - "attributes": { - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", - "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", - "controlID": "C-0014", - "baseScore": 2.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-access-dashboard-subject-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" - }, - { - "name": "rule-access-dashboard-wl-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Mount service principal", - "attributes": { - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", - "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", - "controlID": "C-0020", - "baseScore": 4.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-mount-potential-credentials-paths", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "relevantCloudProviders": [ - "EKS", - "GKE", - "AKS" - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tstart_of_path := volumes_data[\"start_of_path\"]\n result := is_unsafe_paths(volume, start_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"start_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"start_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"start_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, start_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [start_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" - } - ] - }, - { - "name": "Exposed sensitive interfaces", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Initial access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", - "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", - "controlID": "C-0021", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "exposed-sensitive-interfaces-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.sensitiveInterfaces" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveInterfaces", - "name": "Sensitive interfaces", - "description": "List of known software interfaces that should not generally be exposed to the Internet." - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" - } - ] - }, - { - "name": "Kubernetes CronJob", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", - "test": "We list all CronJobs that exist in cluster for the user to approve.", - "controlID": "C-0026", - "baseScore": 1.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rule-deny-cronjobs", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob" - }, - "ruleLanguage": "rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if it's cronjob", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# alert cronjobs\n\n# handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "Delete Kubernetes events", - "attributes": { - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", - "test": "List who has delete/deletecollection RBAC permissions on events.", - "controlID": "C-0031", - "baseScore": 4.0, - "example": "@controls/examples/c031.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-delete-k8s-events-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Validate admission controller (validating)", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0036", - "baseScore": 3.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-validating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Validate admission controller" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns validating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CoreDNS poisoning", - "attributes": { - "microsoftMitreColumns": [ - "Lateral Movement" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", - "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", - "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", - "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", - "controlID": "C-0037", - "baseScore": 4.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-update-configmap-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can update/patch the 'coredns' configmap", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Validate admission controller (mutating)", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0039", - "baseScore": 4.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-mutating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Validate admission controller" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns mutating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "SSH server running inside container", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", - "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", - "controlID": "C-0042", - "baseScore": 3.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-ssh-to-pod-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-rw-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines if any workload contains a hostPath volume with rw permissions", - "remediation": "Set the readOnly field of the mount to true", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"deletePaths\": failed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n}" - } - ] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" - } - ] - }, - { - "name": "Instance Metadata API", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", - "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", - "controlID": "C-0052", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "instance-metadata-api-access", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "cloudProviderInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" - } - ] - }, - { - "name": "Access container service account", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", - "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", - "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", - "controlID": "C-0053", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "access-container-service-account-v1", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" - } - ] - }, - { - "name": "Cluster internal networking", - "attributes": { - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" - } - ] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" - } - ] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "PSP enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" - }, - { - "name": "psp-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0007", - "C-0012", - "C-0014", - "C-0015", - "C-0020", - "C-0021", - "C-0026", - "C-0031", - "C-0035", - "C-0036", - "C-0037", - "C-0039", - "C-0042", - "C-0045", - "C-0048", - "C-0052", - "C-0053", - "C-0054", - "C-0057", - "C-0058", - "C-0059", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070" - ] -} \ No newline at end of file diff --git a/releaseDev/nsa.json b/releaseDev/nsa.json deleted file mode 100644 index 2c1a47229..000000000 --- a/releaseDev/nsa.json +++ /dev/null @@ -1,2096 +0,0 @@ -{ - "name": "NSA", - "description": "Implement NSA security advices for K8s ", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "insecure-port-flag", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - } - ] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - } - ] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-allow-privilege-escalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "immutable-container-filesystem", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" - } - ] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ingress-and-egress-blocked", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if there are no ingress and egress defined for pod", - "remediation": "Make sure you define ingress and egress policies for all your Pods", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" - } - ] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-pid-ipc-privileges", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - } - ] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-network-access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "insecure-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - } - ] - }, - { - "name": "Cluster internal networking", - "attributes": { - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Linux hardening", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "linux-hardening", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define any linux security hardening", - "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" - } - ] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" - } - ] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "PSP enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive pods in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" - }, - { - "name": "psp-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-cpu-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "CPU limits are not set.", - "remediation": "Ensure CPU limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-memory-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "memory limits are not set.", - "remediation": "Ensure memory limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0005", - "C-0012", - "C-0013", - "C-0016", - "C-0017", - "C-0030", - "C-0034", - "C-0035", - "C-0038", - "C-0041", - "C-0044", - "C-0046", - "C-0054", - "C-0055", - "C-0057", - "C-0058", - "C-0059", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0270", - "C-0271" - ] -} \ No newline at end of file diff --git a/releaseDev/rules.json b/releaseDev/rules.json deleted file mode 100644 index 0535100f8..000000000 --- a/releaseDev/rules.json +++ /dev/null @@ -1,8856 +0,0 @@ -[ - { - "name": "outdated-k8s-version", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\tnode := input[_]\n\tnode.kind == \"Node\"\n\tcurrent_version := node.status.nodeInfo.kubeletVersion\n has_outdated_version(current_version)\n\tpath := \"status.nodeInfo.kubeletVersion\"\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Your kubelet version: %s, in node: %s is outdated\", [current_version, node.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [node]},\n\t}\n}\n\n\nhas_outdated_version(version) {\n\t# the `supported_k8s_versions` is validated in the validations script against \"https://api.github.com/repos/kubernetes/kubernetes/releases\"\n supported_k8s_versions := [\"v1.29\", \"v1.28\", \"v1.27\"] \n\tevery v in supported_k8s_versions{\n\t\tnot startswith(version, v)\n\t}\n}\n" - }, - { - "name": "ensure-aws-policies-are-present", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "fails if aws policies are not found", - "remediation": "Implement policies to minimize user access to Amazon ECR", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# deny if policies are not present on AWS\ndeny[msg] {\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"Cluster has not policies to minimize access to Amazon ECR; Add some policy in order to minimize access on it.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": policies\n\t\t}\n\t}\n}\n" - }, - { - "name": "kubelet-hostname-override", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --hostname-override argument is not set.", - "remediation": "Unset the --hostname-override argument.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" - }, - { - "name": "exposed-critical-pods", - "attributes": { - "m$K8sThreatMatrix": "exposed-critical-pods", - "imageScanRelated": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service", - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "armo.vuln.images", - "image.vulnscan.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ImageVulnerabilities" - ] - } - ], - "description": "Fails if pods have exposed services as well as critical vulnerabilities", - "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n\n container.image == vuln.metadata.name\n\n # At least one critical vulnerabilities\n filter_critical_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_critical_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.severity == \"Critical\"\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", - "resourceEnumerator": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}" - }, - { - "name": "rule-manual", - "attributes": { - "actionRequired": "manual review", - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", - "remediation": "", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" - }, - { - "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable certificate based kubelet authentication.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, certificate-based kubelet authentication is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"certificate based kubelet authentication is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t\"--kubelet-client-certificate\",\n\t\t\"--kubelet-client-key\",\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=\", [wanted[i]]),\n\t} |\n\t\twanted[i]\n\t\tnot contains(full_cmd, wanted[i])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "resources-memory-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "memory limits are not set.", - "remediation": "Ensure memory limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-controller-manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "configmap-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "podtemplate-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodTemplate" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "external-secret-storage", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" - }, - { - "name": "ensure_nodeinstancerole_has_right_permissions_for_ecr", - "attributes": { - "useFromKubescapeVersion": "v2.2.5" - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if a NodeInstanceRole has a policies not compliant with the following:\n# {\n# \"Version\": \"YYY-MM-DD\",\n# \"Statement\": [\n# {\n# \"Effect\": \"Allow\",\n# \"Action\": [\n# \"ecr:BatchCheckLayerAvailability\",\n# \"ecr:BatchGetImage\",\n# \"ecr:GetDownloadUrlForLayer\",\n# \"ecr:GetAuthorizationToken\"\n# ],\n# \"Resource\": \"*\"\n# }\n# ]\n# }\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"eks\"\n\n\trole_policies := resources.data.rolesPolicies\n\tnode_instance_role_policies := [key | role_policies[key]; contains(role_policies[key].PolicyRoles[_].RoleName, \"NodeInstance\")]\n\n\t# check if the policy satisfies the minimum prerequisites\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\t# node_instance_role_policies := [\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"]\n\tsome policy in node_instance_role_policies\n\t\tsome stat, _ in policies.data.policiesDocuments[policy].Statement\n\t\t\tnot isPolicyCompliant(policies, policy, stat)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Cluster has none read-only access to ECR; Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n\nisPolicyCompliant(policies, policy, stat) {\n\t# allowed action provided by the CIS\n\tallowed_actions := [\"ecr:BatchCheckLayerAvailability\",\n \t \"ecr:BatchGetImage\",\n \t \"ecr:GetAuthorizationToken\",\n \t \"ecr:GetDownloadUrlForLayer\"]\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Effect == \"Allow\"\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Resource == \"*\"\n\tsorted_actions := sort(policies.data.policiesDocuments[policy].Statement[stat].Action)\n\tsorted_actions == allowed_actions\n}\n" - }, - { - "name": "rule-excessive-delete-rights-v1", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "host-pid-ipc-privileges", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - }, - { - "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "kubelet-strong-cryptographics-ciphers", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the Kubelet is configured to only use strong cryptographic ciphers.", - "remediation": "Change --tls-cipher-suites value of TLSCipherSuites property of config file to use strong cryptographics ciphers", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.13 https://workbench.cisecurity.org/sections/1126668/recommendations/1838663\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--tls-cipher-suites\")\n\n\tnot has_strong_cipher_set_via_cli(command)\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.TLSCipherSuites\n\n\tnot is_value_in_strong_cliphers_set(yamlConfig.TLSCipherSuites)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [\"TLSCipherSuites\"],\n\t\t\"failedPaths\": [\"TLSCipherSuites\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\nhas_strong_cipher_set_via_cli(command) {\n\tcontains(command, \"--tls-cipher-suites=\")\n\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome i\n\tcontains(command, sprintf(\"%v%v\", [\"--tls-cipher-suites=\", strong_cliphers[i]]))\n}\n\nis_value_in_strong_cliphers_set(value) {\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome x\n\tstrong_cliphers[x] == value\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".key\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "workload-mounted-secrets", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Secret" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.secret\n\n\tsecret := input[_]\n\tsecret.kind == \"Secret\"\n\tsecret.metadata.name == volume.secret.secretName\n\tis_same_namespace(secret.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted secret\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": secret\n }]\n\t}\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - }, - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - }, - { - "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Set global request timeout for API server requests as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--request-timeout` is set to 60 seconds.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--request-timeout\")\n\tresult = {\n\t\t\"alert\": \"Please validate the request timeout flag is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeProxyInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n" - }, - { - "name": "ensure-external-secrets-storage-is-in-use", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "relevantCloudProviders": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.kubernetes.api.client\n\n# deny workloads that doesn't support external service provider (secretProviderClass)\n# reference - https://secrets-store-csi-driver.sigs.k8s.io/concepts.html\ndeny[msga] {\n\n resources := input[_]\n\n\t# get volume paths for each resource\n\tvolumes_path := get_volumes_path(resources)\n\n\t# get volumes for each resources\n\tvolumes := object.get(resources, volumes_path, [])\n\n\t# continue if secretProviderClass not found in resource\n\thaving_secretProviderClass := {i | volumes[i].csi.volumeAttributes.secretProviderClass}\n \tcount(having_secretProviderClass) == 0\n\n\n\t# prepare message data.\n\talert_message := sprintf(\"%s: %v is not using external secret storage\", [resources.kind, resources.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\":sprintf(\"%s[0].csi.volumeAttributes.secretProviderClass\",[concat(\".\", volumes_path)]), \"value\":\"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\n}\n\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n" - }, - { - "name": "CVE-2022-47633", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Limit the rate at which the API server accepts requests.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```\n\n#### Impact Statement\nYou need to carefully tune in limits as per your environment.\n\n#### Default Value\nBy default, `EventRateLimit` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"EventRateLimit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"EventRateLimit\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=EventRateLimit\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "workload-with-cluster-takeover-roles", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_start_of_path(wl)\n wl_spec := object.get(wl, start_of_path, [])\n\n # get service account wl is using\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(wl_spec, sa.metadata, wl.metadata)\n\n # check service account token is mounted\n is_sa_auto_mounted(wl_spec, sa)\n\n # check if sa has cluster takeover roles\n role := input[_]\n role.kind in [\"Role\", \"ClusterRole\"]\n is_takeover_role(role)\n\n rolebinding := input[_]\n\trolebinding.kind in [\"RoleBinding\", \"ClusterRoleBinding\"] \n rolebinding.roleRef.name == role.metadata.name\n rolebinding.subjects[j].kind == \"ServiceAccount\"\n rolebinding.subjects[j].name == sa.metadata.name\n rolebinding.subjects[j].namespace == sa.metadata.namespace\n\n reviewPath := \"roleRef\"\n deletePath := sprintf(\"subjects[%d]\", [j])\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v has cluster takeover roles\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa,\n },\n {\n \"object\": rolebinding,\n\t\t \"reviewPaths\": [reviewPath],\n \"deletePaths\": [deletePath],\n },\n {\n \"object\": role,\n },]\n }\n}\n\n\nget_start_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken not in pod spec\n not wl_spec.automountServiceAccountToken == false\n not wl_spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n}\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken set to true in pod spec\n wl_spec.automountServiceAccountToken == true\n}\n\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n wl_spec.serviceAccountName == sa_metadata.name\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n not wl_spec.serviceAccountName \n sa_metadata.name == \"default\"\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\n# is_same_namespace supports cases where ns is not configured in the metadata\n# for yaml scans\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n\n# look for rule allowing create/update workloads\nis_takeover_role(role){\n takeover_resources := [\"pods\", \"*\"]\n takeover_verbs := [\"create\", \"update\", \"patch\", \"*\"]\n takeover_api_groups := [\"\", \"*\"]\n \n takeover_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in takeover_resources ; \n rule.verbs[b] in takeover_verbs ; \n rule.apiGroups[c] in takeover_api_groups]\n count(takeover_rule) > 0\n}\n\n# look for rule allowing secret access\nis_takeover_role(role){\n rule := role.rules[i]\n takeover_resources := [\"secrets\", \"*\"]\n takeover_verbs := [\"get\", \"list\", \"watch\", \"*\"]\n takeover_api_groups := [\"\", \"*\"]\n \n takeover_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in takeover_resources ; \n rule.verbs[b] in takeover_verbs ; \n rule.apiGroups[c] in takeover_api_groups]\n count(takeover_rule) > 0\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" - }, - { - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" - }, - { - "name": "instance-metadata-api-access", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "cloudProviderInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" - }, - { - "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```\n\n#### Impact Statement\nYou would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-private-key-file` it not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token can not be rotated as needed\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-private-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-private-key-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "immutable-container-filesystem", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" - }, - { - "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 448 # == 0o700\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "kubelet-rotate-kubelet-server-certificate", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`RotateKubeletServerCertificate=true`, args[i])\n}\n" - }, - { - "name": "pods-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `Node` authorization is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubelet nodes can read objects that are not associated with them\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"Node\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"Node\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=Node\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "CVE-2022-23648", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" - }, - { - "name": "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "Scan images being deployed to Azure (AKS) for vulnerabilities. Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security. When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file. When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", - "remediation": "Enable Azure Defender image scanning. Command: az aks update --enable-defender --resource-group --name ", - "ruleQuery": "armo_builtin", - "rule": "package armo_builtins\n\n# fails in case Azure Defender image scanning is not enabled.\ndeny[msga] {\n cluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties \n\n not isAzureImageScanningEnabled(properties)\n\n msga := {\n\t\t\"alertMessage\": \"Azure Defender image scanning is not enabled.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks update --enable-defender --resource-group --name \",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_describe\n },\n\n\t}\n}\n\n# isAzureImageScanningEnabled check if Azure Defender is enabled into the ClusterDescribe object.\nisAzureImageScanningEnabled(properties) {\n properties.securityProfile.defender.securityMonitoring.enabled == true\n}\n" - }, - { - "name": "container-image-repository-v1", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useFromKubescapeVersion": "v2.9.0" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" - }, - { - "name": "psp-deny-hostipc", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "rule-can-ssh-to-pod-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" - }, - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useUntilKubescapeVersion": "v2.3.8" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - }, - { - "name": "psp-required-drop-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs don't have requiredDropCapabilities\n\t# if even one PSP has requiredDropCapabilities, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot has_requiredDropCapabilities(psp.spec)\n\t}\n\n\t# return al the PSPs that don't have requiredDropCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot has_requiredDropCapabilities(psp.spec)\n\n\tfixpath := {\"path\":\"spec.requiredDropCapabilities[0]\", \"value\":\"ALL\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' doesn't have requiredDropCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\nhas_requiredDropCapabilities(spec) {\n\tcount(spec.requiredDropCapabilities) > 0\n}\n" - }, - { - "name": "set-seccomp-profile", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "ensure-https-loadbalancers-encrypted-with-tls-aws", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "ruleDependencies": [], - "relevantCloudProviders": [ - "EKS" - ], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# deny LoadBalancer services that are configured for ssl connection (port: 443), but don't have TLS certificate set.\ndeny[msga] {\n\n\twl_kind := \"Service\"\n\twl_type := \"LoadBalancer\"\n\twl_required_annotation := \"service.beta.kubernetes.io/aws-load-balancer-ssl-cert\"\n\n\t# filterring LoadBalancers\n\twl := \tinput[_]\n\twl.kind == wl_kind\n\twl.spec.type == wl_type\n\n\t# filterring loadbalancers with port 443.\n\twl.spec.ports[_].port == 443\n\n\t# filterring annotations without ssl cert confgiured.\n\tannotations := object.get(wl, [\"metadata\", \"annotations\"], [])\n\tssl_cert_annotations := [annotations[i] | annotation = i; startswith(i, wl_required_annotation)]\n\tcount(ssl_cert_annotations) == 0\n\n\t# prepare message data.\n\talert_message := sprintf(\"LoadBalancer '%v' has no TLS configured\", [wl.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\": sprintf(\"metadata.annotations['%v']\", [wl_required_annotation]), \"value\": \"AWS_LOADBALANCER_SSL_CERT\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wl\n\t\t}\n\t}\n}\n\n", - "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\tobj := input[_]\n\tobj.kind == \"Service\"\n\tobj.spec.type == \"LoadBalancer\"\n\tmsga := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n" - }, - { - "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" - }, - { - "name": "alert-mount-potential-credentials-paths", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "relevantCloudProviders": [ - "EKS", - "GKE", - "AKS" - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tstart_of_path := volumes_data[\"start_of_path\"]\n result := is_unsafe_paths(volume, start_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"start_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"start_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"start_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, start_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [start_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" - }, - { - "name": "service-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the kube scheduler is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue = matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" - }, - { - "name": "sudo-in-container-entrypoint", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, start_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" - }, - { - "name": "container-hostPort", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - }, - { - "name": "replicationcontroller-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ReplicationController" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "kubelet-protect-kernel-defaults", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the --protect-kernel-defaults argument is set to true.", - "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"protectKernelDefaults\"],\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Reject creating objects in a namespace that is undergoing termination.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NamespaceLifecycle` is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"NamespaceLifecycle\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"NamespaceLifecycle\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "namespace-without-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Namespace", - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace does not have service accounts (not incluiding default)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - }, - { - "name": "exposure-to-internet", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n\t\t \"reviewPaths\": failPath,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n\n # Make sure that they belong to the same namespace\n svc.metadata.namespace == ingress.metadata.namespace\n\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [\n\t\t{\n\t \"object\": ingress,\n\t\t \"reviewPaths\": result,\n\t \"failedPaths\": result,\n\t },\n\t\t{\n\t \"object\": svc,\n\t\t}\n ]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n" - }, - { - "name": "resources-cpu-limit-and-request", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.cpu_request_max", - "settings.postureControlInputs.cpu_request_min", - "settings.postureControlInputs.cpu_limit_min", - "settings.postureControlInputs.cpu_limit_max" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.cpu_request_max", - "name": "cpu_request_max", - "description": "Ensure a CPU resource request is set and is under this defined maximum value." - }, - { - "path": "settings.postureControlInputs.cpu_request_min", - "name": "cpu_request_min", - "description": "Ensure a CPU resource request is set and is above this defined minimum value." - }, - { - "path": "settings.postureControlInputs.cpu_limit_max", - "name": "cpu_limit_max", - "description": "Ensure a CPU resource limit is set and is under this defined maximum value." - }, - { - "path": "settings.postureControlInputs.cpu_limit_min", - "name": "cpu_limit_min", - "description": "Ensure a CPU resource limit is set and is above this defined minimum value." - } - ], - "description": "CPU limits and requests are not set.", - "remediation": "Ensure CPU limits and requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ==================================== no CPU requests =============================================\n# Fails if pod does not have container with CPU request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU requests\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU requests\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n# ============================================= cpu limits exceed min/max =============================================\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tpath := \"resources.limits.cpu\" \n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tpath := \"resources.limits.cpu\" \n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n \tpath := \"resources.limits.cpu\" \n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# ============================================= cpu requests exceed min/max =============================================\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tpath := \"resources.requests.cpu\" \n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tpath := \"resources.requests.cpu\" \n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tpath := \"resources.requests.cpu\" \n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n#################################################################################################################\n\n\nis_min_max_exceeded_cpu(container) = \"resources.limits.cpu\" {\n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n} else = \"resources.requests.cpu\" {\n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n} else = \"\"\n\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_min_limit_exceeded_cpu(cpu_limit)\n}\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_max_limit_exceeded_cpu(cpu_limit)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_max_request_exceeded_cpu(cpu_req)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_min_request_exceeded_cpu(cpu_req)\n}\n\nis_max_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_max := data.postureControlInputs.cpu_limit_max[_]\n\tcompare_max(cpu_limit_max, cpu_limit)\n}\n\nis_min_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_min := data.postureControlInputs.cpu_limit_min[_]\n\tcompare_min(cpu_limit_min, cpu_limit)\n}\n\nis_max_request_exceeded_cpu(cpu_req) {\n\tcpu_req_max := data.postureControlInputs.cpu_request_max[_]\n\tcompare_max(cpu_req_max, cpu_req)\n}\n\nis_min_request_exceeded_cpu(cpu_req) {\n\tcpu_req_min := data.postureControlInputs.cpu_request_min[_]\n\tcompare_min(cpu_req_min, cpu_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tto_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tto_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tto_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tto_number(given) > to_number(max)\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tto_number(split_given) < to_number(split_min)\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tto_number(split_given) < to_number(split_min)\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tto_number(split_given) < to_number(split_min)\n\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tto_number(given) < to_number(min)\n\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" - }, - { - "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "review-roles-with-aws-iam-authenticator", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresource.kind == \"Role\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"For namespace '%v', make sure Kubernetes RBAC users are managed with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156\", [resource.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resource\n\t\t}\n\t}\n}\n" - }, - { - "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "CVE-2022-3172", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apiregistration.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "APIService" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "apiserverinfo.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "APIServerInfo" - ] - } - ], - "ruleDependencies": [], - "description": "List aggregated API server APIServices if kube-api-server version is vulnerable to CVE-2022-3172", - "remediation": "Upgrade the Kubernetes version to one of the fixed versions. The following versions are fixed: `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\tversion = get_api_server_version(api_infos)\n\tis_api_server_version_affected(version)\n\n\t# Find the service that exposes the extended API\n\tservices = [obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\tcount(services) == 1\n\tservice = services[0]\n\n\tmsg := {\n\t\t\"alertMessage\": \"the following pair of APIService and Service may redirect client traffic to any URL\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj, service]},\n\t}\n}\n\n# current kubescpae version (v2.0.171) still not support this resource\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tsemver.is_valid(v)\n\tversion = v\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tnot semver.is_valid(v)\n\tversion := \"\"\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) != 1\n\tversion = \"\"\n}\n\nis_api_server_version_affected(version) {\n\tversion == \"\"\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.25.0\") == 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.24.0\") >= 0\n\tsemver.compare(version, \"1.24.4\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.23.0\") >= 0\n\tsemver.compare(version, \"1.23.10\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.22.0\") >= 0\n\tsemver.compare(version, \"1.22.13\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.21.14\") <= 0\n}\n", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\t# Find the service that exposes the extended API\n\tservices = [ obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\n\tmsg := {\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n" - }, - { - "name": "image-pull-policy-is-not-set-to-always", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" - }, - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```\n\n#### Impact Statement\nThis admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies\n\n#### Default Value\nBy default, `SecurityContextDeny` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\":\"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster\", \n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"SecurityContextDeny\" in flag.values\n\tnot \"PodSecurityPolicy\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"SecurityContextDeny\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=SecurityContextDeny\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "psp-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" - }, - { - "name": "psp-deny-root-container", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" - }, - { - "name": "etcd-peer-tls-enabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```\n\n#### Impact Statement\netcd cluster peers would need to set up TLS for their communication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if peer tls is enabled in etcd cluster\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd encryption for peer connection is not enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--peer-cert-file\", \"\"],\n\t\t[\"--peer-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [\"spec.containers[0].command\"],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-cafile` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to use SSL Certificate Authority file for etcd\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--etcd-cafile\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--etcd-cafile=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to serve only HTTPS traffic\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--tls-cert-file\", \"\"],\n\t\t[\"--tls-private-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "linux-hardening", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define any linux security hardening", - "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" - }, - { - "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable anonymous requests to the API server.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "kubelet-event-qps", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", - "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"eventRecordQPS\"],\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Retain the logs for at least 30 days or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_value(cmd) = {\"origin\": origin, \"value\": value} {\n\tre := \" ?--audit-log-maxage=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalue = to_number(matchs[0][1])\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag = get_flag_value(cmd[i])\n\tflag.value < 30\n\tfixed = replace(cmd[i], flag.origin, \"--audit-log-maxage=30\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"alert\": sprintf(\"Audit log retention period is %v days, which is too small (should be at least 30 days)\", [flag.value]),\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxage\")\n\tresult = {\n\t\t\"alert\": \"Audit log retention period is not set\",\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%v]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-maxage=30\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "rule-access-dashboard-subject-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".crt\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "alert-rw-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines if any workload contains a hostPath volume with rw permissions", - "remediation": "Set the readOnly field of the mount to true", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"deletePaths\": failed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n}" - }, - { - "name": "rule-cni-enabled-aks", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if cni is not enabled like defined in:\n# https://learn.microsoft.com/en-us/azure/aks/use-network-policies#create-an-aks-cluster-and-enable-network-policy\ndeny[msga] {\n\tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot cni_enabled_aks(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"cni is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_describe,\n\t\t},\n\t}\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"azure\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"kubenet\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n" - }, - { - "name": "host-network-access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - }, - { - "name": "rule-identify-blocklisted-image-registries", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "useUntilKubescapeVersion": "v2.3.8" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.publicRegistries", - "settings.postureControlInputs.untrustedRegistries" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.publicRegistries", - "name": "Public registries", - "description": "Kubescape checks none of these public container registries are in use." - }, - { - "path": "settings.postureControlInputs.untrustedRegistries", - "name": "Registries block list", - "description": "Kubescape checks none of these user-provided container registries are in use." - } - ], - "description": "Identifying if pod container images are from unallowed registries", - "remediation": "Use images from safe registry", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\tregistry := untrusted_registries[_]\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\tregistry := public_registries[_]\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) = result {\n not contains(image, \"/\")\n result := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - }, - { - "name": "rule-identify-blocklisted-image-registries-v1", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "useFromKubescapeVersion": "v2.9.0" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.publicRegistries", - "settings.postureControlInputs.untrustedRegistries" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.publicRegistries", - "name": "Public registries", - "description": "Kubescape checks none of these public container registries are in use." - }, - { - "path": "settings.postureControlInputs.untrustedRegistries", - "name": "Registries block list", - "description": "Kubescape checks none of these user-provided container registries are in use." - } - ], - "description": "Identifying if pod container images are from unallowed registries", - "remediation": "Use images from safe registry", - "ruleQuery": "", - "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tuntrusted_or_public_registries(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\tregistry := untrusted_registries[_]\n\tstartswith(image, registry)\n\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\tregistry := public_registries[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}" - }, - { - "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not allow all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.\n\n#### Impact Statement\nOnly requests explicitly allowed by the admissions control plugins would be served.\n\n#### Default Value\n`AlwaysAdmit` is not in the list of default admission plugins.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\t\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"AlwaysAdmit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"AlwaysAdmit\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "horizontalpodautoscaler-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "autoscaling" - ], - "apiVersions": [ - "v2" - ], - "resources": [ - "HorizontalPodAutoscaler" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "rule-secrets-in-env-var", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if Pods have secrets in environment variables", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "psp-deny-privileged-container", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\twanted = [\n\t\t\"TLS_AES_128_GCM_SHA256\",\n\t\t\"TLS_AES_256_GCM_SHA384\",\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t]\n\tresult = invalid_flag(obj.spec.containers[0].command, wanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, wanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tmissing = [x | x = wanted[_]; not x in flag.values]\n\tcount(missing) > 0\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, missing)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "ensure-azure-rbac-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.", - "remediation": "Enable Azure RBAC on AKS by using the following command: az aks update -g -n --enable-azure-rbac", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails in case Azure RBAC is not set on AKS instance.\ndeny[msga] {\n \tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot isAzureRBACEnabled(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Azure RBAC is not set. Enable it using the command: az aks update -g -n --enable-azure-rbac\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"az aks update -g -n --enable-azure-rbac\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": cluster_describe\n\t\t},\n\t} \n}\n\n# isAzureRBACEnabled check if Azure RBAC is enabled into ClusterDescribe object\n# retrieved from azure cli.\nisAzureRBACEnabled(properties) {\n properties.aadProfile.enableAzureRBAC == true\n}\n" - }, - { - "name": "set-procmount-default", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if container does not define securityContext.procMount to Default.", - "remediation": "Set securityContext.procMount to Default", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n" - }, - { - "name": "exposed-rce-pods", - "attributes": { - "m$K8sThreatMatrix": "exposed-rce-pods", - "useFromKubescapeVersion": "v2.0.150", - "imageScanRelated": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service", - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "armo.vuln.images", - "image.vulnscan.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ImageVulnerabilities" - ] - } - ], - "description": "fails if known pods have exposed services and known vulnerabilities with remote code execution", - "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # At least one rce vulnerability\n filter_rce_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n\t\t\"reviewPaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_rce_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.categories.isRce == true\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", - "resourceEnumerator": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ; x.apiVersion == \"v1\"]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ; x.apiVersion == \"v1\"]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"] # TODO: x.apiVersion == \"--input--\" || x.apiVersion == \"--input--\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}" - }, - { - "name": "restrict-access-to-the-control-plane-endpoint", - "attributes": { - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\npackage armo_builtins\n\n# fails in case authorizedIPRanges is not set.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isAuthorizedIPRangesSet(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Parameter 'authorizedIPRanges' was not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n\n}\n\nisAuthorizedIPRangesSet(config) {\n\tcount(config.properties.apiServerAccessProfile.authorizedIPRanges) > 0\n}\n" - }, - { - "name": "pod-security-admission-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-default-service-accounts-has-only-default-roles", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"deletePaths\": [sprintf(\"subjects[%d]\", [i])],\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "etcd-unique-ca", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```\n\n#### Impact Statement\nAdditional management of the certificates and keys for the dedicated certificate authority will be required.\n\n#### Default Value\nBy default, no etcd certificate is created and used.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 2.7 https://workbench.cisecurity.org/sections/1126654/recommendations/1838578\n\ndeny[msga] {\n\tetcdPod := [pod | pod := input[_]; filter_input(pod, \"etcd\")]\n\tetcdCheckResult := get_argument_value_with_path(etcdPod[0].spec.containers[0].command, \"--trusted-ca-file\")\n\n\tapiserverPod := [pod | pod := input[_]; filter_input(pod, \"kube-apiserver\")]\n\tapiserverCheckResult := get_argument_value_with_path(apiserverPod[0].spec.containers[0].command, \"--client-ca-file\")\n\n\tetcdCheckResult.value == apiserverCheckResult.value\n\tmsga := {\n\t\t\"alertMessage\": \"Cert file is the same both for the api server and the etcd\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"failedPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"fixPaths\": [etcdCheckResult.fix_paths, apiserverCheckResult.fix_paths],\n\t\t\"alertObject\": {\"k8sApiObjects\": [etcdPod[0], apiserverPod[0]]},\n\t}\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"kube-apiserver\")\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"etcd\")\n}\n\nfilter_input(obj, res) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], res)\n}\n\nget_argument_value(command, argument) = value {\n\targs := split(command, \"=\")\n\tsome i, sprintf(\"%v\", [argument]) in args\n\tvalue := args[i + 1]\n}\n\nget_argument_value_with_path(cmd, argument) = result {\n\tcontains(cmd[i], argument)\n\targumentValue := get_argument_value(cmd[i], argument)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"path\": path,\n\t\t\"value\": argumentValue,\n\t\t\"fix_paths\": {\"path\": path, \"value\": \"\"},\n\t}\n}\n" - }, - { - "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, no encryption provider is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is set but not using one of the recommended providers\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# For each resource check if it does not have allowed provider\n\tfix_paths := [{\n\t\t\"path\": sprintf(\"resources[%d].providers[%d]\", [i, count(resource.providers)]),\n\t\t\"value\": \"{\\\"aescbc\\\" | \\\"secretbox\\\" | \\\"kms\\\" : }\", # must be string\n\t} |\n\t\tresource := config_file_content.resources[i]\n\t\tcount({true |\n\t\t\tsome provider in resource.providers\n\t\t\thas_one_of_keys(provider, [\"aescbc\", \"secretbox\", \"kms\"])\n\t\t}) == 0\n\t]\n\n\tcount(fix_paths) > 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using one of the allowed providers (aescbc, secretbox, kms)\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_key(x, k) {\n\t_ = x[k]\n}\n\nhas_one_of_keys(x, keys) {\n\thas_key(x, keys[_])\n}\n" - }, - { - "name": "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled", - "attributes": { - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", - "ruleQuery": "armo_builtins", - "rule": "\npackage armo_builtins\n\n# fails in case privateEndpoint.id parameter is not found on ClusterDescribe\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateEndpointEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Private endpoint not enabled.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateEndpointEnabled(config) {\n\tconfig.properties.privateEndpoint.id\n}\n" - }, - { - "name": "pod-security-admission-restricted-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Check if EndpointPublicAccess in enabled on a private node for EKS. A private node is a node with no public ips access.\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n\n\t# filter out private nodes\n\t\"0.0.0.0/0\" in config.Cluster.ResourcesVpcConfig.PublicAccessCidrs\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPublicAccess is enabled on a private node\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" - }, - { - "name": "resources-cpu-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "CPU limits are not set.", - "remediation": "Ensure CPU limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" - }, - { - "name": "verify-image-signature", - "attributes": { - "useFromKubescapeVersion": "v2.1.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "ruleQuery": "armo_builtins", - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.trustedCosignPublicKeys", - "name": "Trusted Cosign public keys", - "description": "A list of trusted Cosign public keys that are used for validating container image signatures." - } - ], - "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.containers[%v].image\", [i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" - }, - { - "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "pod-security-admission-restricted-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "workload-with-administrative-roles", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_start_of_path(wl)\n wl_spec := object.get(wl, start_of_path, [])\n\n # get service account wl is using\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(wl_spec, sa.metadata, wl.metadata)\n\n # check service account token is mounted\n is_sa_auto_mounted(wl_spec, sa)\n\n # check if sa has administrative roles\n role := input[_]\n role.kind in [\"Role\", \"ClusterRole\"]\n is_administrative_role(role)\n\n rolebinding := input[_]\n\trolebinding.kind in [\"RoleBinding\", \"ClusterRoleBinding\"] \n rolebinding.roleRef.name == role.metadata.name\n rolebinding.subjects[j].kind == \"ServiceAccount\"\n rolebinding.subjects[j].name == sa.metadata.name\n rolebinding.subjects[j].namespace == sa.metadata.namespace\n\n reviewPath := \"roleRef\"\n deletePath := sprintf(\"subjects[%d]\", [j])\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v has administrative roles\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa,\n },\n {\n \"object\": rolebinding,\n\t\t \"reviewPaths\": [reviewPath],\n \"deletePaths\": [deletePath],\n },\n {\n \"object\": role,\n },]\n }\n}\n\n\nget_start_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken not in pod spec\n not wl_spec.automountServiceAccountToken == false\n not wl_spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n}\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken set to true in pod spec\n wl_spec.automountServiceAccountToken == true\n}\n\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n wl_spec.serviceAccountName == sa_metadata.name\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n not wl_spec.serviceAccountName \n sa_metadata.name == \"default\"\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\n# is_same_namespace supports cases where ns is not configured in the metadata\n# for yaml scans\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n\nis_administrative_role(role){\n administrative_resources := [\"*\"]\n administrative_verbs := [\"*\"]\n administrative_api_groups := [\"\", \"*\"]\n \n administrative_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in administrative_resources ; \n rule.verbs[b] in administrative_verbs ; \n rule.apiGroups[c] in administrative_api_groups]\n count(administrative_rule) > 0\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" - }, - { - "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "rule-can-impersonate-users-groups-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the Controller Manager API service is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue =matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "set-supplementalgroups-values", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.supplementalgroups is not set.", - "remediation": "Set securityContext.supplementalgroups values", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n\tnot pod.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n\tnot wl.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n\tnot cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n" - }, - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Use individual service account credentials for each controller.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```\n\n#### Impact Statement\nWhatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.\n\n#### Default Value\nBy default, `--use-service-account-credentials` is set to false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"--use-service-account-credentials is set to false in the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--use-service-account-credentials=false\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--use-service-account-credentials=true\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--use-service-account-credentials\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--use-service-account-credentials=true\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "ensure-clusters-are-created-with-private-nodes", - "attributes": { - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "remediation": "az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\n# fails in case enablePrivateCluster is set to false.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateClusterEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Cluster does not have private nodes.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr\",\n \t\"alertObject\": {\n\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateClusterEnabled(config) {\n\tconfig.properties.apiServerAccessProfile.enablePrivateCluster == true\n}\n" - }, - { - "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" - }, - { - "name": "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled or EndpointPublicAccess is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\t\t\n\tis_endpointaccess_misconfigured(config)\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled, or EndpointPublicAccess is enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs='203.0.113.5/32'\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n# check if EndpointPrivateAccess is disabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false\n}\n\n# check if EndpointPublicAccess is enabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n}\n\n" - }, - { - "name": "etcd-auto-tls-disabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Do not use self-signed certificates for TLS.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```\n\n#### Impact Statement\nClients will not be able to use self-signed certificates for TLS.\n\n#### Default Value\nBy default, `--auto-tls` is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Auto tls is enabled. Clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--auto-tls=true\")\n\tfixed = replace(cmd[i], \"--auto-tls=true\", \"--auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "set-sysctls-params", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.sysctls is not set.", - "remediation": "Set securityContext.sysctls params", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has sysctls set\n not pod.spec.securityContext.sysctls\n\n path := \"spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.sysctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has sysctls set\n not wl.spec.template.spec.securityContext.sysctls\n\n path := \"spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.sysctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has sysctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.sysctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not disable the secure port.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.\n\n#### Impact Statement\nYou need to set the API Server up with the right TLS certificates.\n\n#### Default Value\nBy default, port 6443 is used as the secure port.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tcontains(obj.spec.containers[0].command[i], \"--secure-port=0\")\n\tmsg := {\n\t\t\"alertMessage\": \"the secure port is disabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"failedPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable kubelet server certificate rotation on controller-manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"`RotateKubeletServerCertificate` is set to false on the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"RotateKubeletServerCertificate=false\")\n\tfixed = replace(cmd[i], \"RotateKubeletServerCertificate=false\", \"RotateKubeletServerCertificate=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "kubelet-streaming-connection-idle-timeout", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if a kubelet has not disabled timeouts on streaming connections", - "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--client-ca-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server communication is not encrypted properly\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-ca-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "CVE-2022-0185", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "LinuxKernelVariables" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n\n parsed_kernel_version_arr := parse_kernel_version_to_array(node.status.nodeInfo.kernelVersion)\n is_azure := parsed_kernel_version_arr[4] == \"azure\"\n\n is_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure)\n\n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n \"reviewPaths\": [\"kernelVersion\"],\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\n# General Kernel versions are between 5.1.1 and 5.16.2\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == false\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 16\n parsed_kernel_version_arr[2] < 2\n}\n\n# Azure kernel version with is 5.4.0-1067-azure\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == true\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 4\n parsed_kernel_version_arr[2] == 0\n parsed_kernel_version_arr[3] < 1067\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}\n\nparse_kernel_version_to_array(kernel_version_str) = output {\n\tversion_triplet := regex.find_n(`(\\d+\\.\\d+\\.\\d+)`, kernel_version_str,-1)\n version_triplet_array := split(version_triplet[0],\".\")\n\n build_vendor := regex.find_n(`-(\\d+)-(\\w+)`, kernel_version_str,-1)\n build_vendor_array := split(build_vendor[0],\"-\")\n\n output := [to_number(version_triplet_array[0]),to_number(version_triplet_array[1]),to_number(version_triplet_array[2]),to_number(build_vendor_array[1]),build_vendor_array[2]]\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" - }, - { - "name": "system-authenticated-allowed-to-take-over-cluster", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in system:authenticated user has cluster takeover rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", - "remediation": "Remove any RBAC rules which allow system:authenticated users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n subjectVector := input[_]\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\n subject := rolebinding.subjects[k]\n # Check if the subject is gourp\n subject.kind == \"Group\"\n # Check if the subject is system:authenticated\n subject.name == \"system:authenticated\"\n\n\n # Find the bound roles\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n # Check if the role and rolebinding bound\n is_same_role_and_binding(role, rolebinding)\n\n\n # Check if the role has access to workloads, exec, attach, portforward\n\trule := role.rules[p]\n rule.resources[l] in [\"*\",\"pods\", \"pods/exec\", \"pods/attach\", \"pods/portforward\",\"deployments\",\"statefulset\",\"daemonset\",\"jobs\",\"cronjobs\",\"nodes\",\"secrets\"]\n\n\tfinalpath := array.concat([\"\"], [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [i]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": \"system:authenticated has sensitive roles\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\" : subjectVector\n\t\t},\n\t}\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"RoleBinding\"\n role.kind == \"Role\"\n rolebinding.metadata.namespace == role.metadata.namespace\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"ClusterRoleBinding\"\n role.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}" - }, - { - "name": "pod-security-admission-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - }, - { - "name": "list-all-namespaces", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - } - ], - "ruleDependencies": [], - "description": "lists all namespaces for users to review", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "exposed-sensitive-interfaces-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.sensitiveInterfaces" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveInterfaces", - "name": "Sensitive interfaces", - "description": "List of known software interfaces that should not generally be exposed to the Internet." - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" - }, - { - "name": "rbac-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" - }, - { - "name": "drop-capability-netraw", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "set-fsgroupchangepolicy-value", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" - }, - { - "name": "persistentvolumeclaim-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PersistentVolumeClaim" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "rule-hostile-multitenant-workloads", - "attributes": { - "actionRequired": "manual review" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "ruleDependencies": [], - "configInputs": [], - "controlConfigInputs": [], - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", - "remediation": "Use physically isolated clusters", - "ruleQuery": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not always authorize all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```\n\n#### Impact Statement\nOnly authorized requests will be served.\n\n#### Default Value\nBy default, `AlwaysAllow` is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"AlwaysAllow authorization mode is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# Check if include AlwaysAllow\n\t\"AlwaysAllow\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val = flag.values[_]; val != \"AlwaysAllow\"]\n\tfixed_flag = get_fixed_flag(fixed_values)\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\n\nget_fixed_flag(values) = fixed {\n\tcount(values) == 0\n\tfixed = \"--authorization-mode=RBAC\" # If no authorization-mode, set it to RBAC, as recommended by CIS\n}\nget_fixed_flag(values) = fixed {\n\tcount(values) > 0\n\tfixed = sprintf(\"--authorization-mode=%s\", [concat(\",\", values)])\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "pv-without-encryption", - "attributes": { - "useFromKubescapeVersion": "v3.0.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PersistentVolume" - ] - }, - { - "apiGroups": [ - "storage.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "StorageClass" - ] - } - ], - "description": "PersistentVolume without encryption", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n\tpv := input[_]\n\tpv.kind == \"PersistentVolume\"\n\n\t# Find the related storage class\n\tstorageclass := input[_]\n\tstorageclass.kind == \"StorageClass\"\n\tpv.spec.storageClassName == storageclass.metadata.name\n\n\t# Check if storage class is encrypted\n\tnot is_storage_class_encrypted(storageclass)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Volume '%v' has is using a storage class that does not use encryption\", [pv.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": \"pv.spec.storageClassName\",\n\t\t\t\"value\": \"\"\n }],\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pv]}\n\t}\n}\n\n# Storage class is encrypted - AWS\nis_storage_class_encrypted(storageclass) {\n\tstorageclass.parameters.encrypted == \"true\"\n}\n\n# Storage class is encrypted - Azure\nis_storage_class_encrypted(storageclass) {\n\tstorageclass.provisioner\n\tcontains(storageclass.provisioner,\"azure\")\n}\n\n# Storage class is encrypted - GCP\nis_storage_class_encrypted(storageclass) {\n\t# GKE encryption is enabled by default https://cloud.google.com/blog/products/containers-kubernetes/exploring-container-security-use-your-own-keys-to-protect-your-data-on-gke\n\tstorageclass.provisioner\n\tcontains(storageclass.provisioner,\"csi.storage.gke.io\")\n}\n\n" - }, - { - "name": "k8s-common-labels-usage", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.k8sRecommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.k8sRecommendedLabels", - "name": "Kubernetes Recommended Labels", - "description": "Kubescape checks that workloads have at least one of this list of configurable labels, as recommended in the Kubernetes documentation." - } - ], - "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" - }, - { - "name": "rolebinding-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "csistoragecapacity-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "storage.k8s.io" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "CSIStorageCapacity" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "list-all-mutating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Validate admission controller" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns mutating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" - }, - { - "name": "workload-mounted-pvc", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts PVC", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - }, - { - "name": "kubelet-authorization-mode-alwaysAllow", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Do not allow all requests. Enable explicit authorization.", - "remediation": "Change authorization mode to Webhook.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [\"authorization.mode\"],\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "serviceaccount-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ensure-network-policy-is-enabled-eks", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# EKS supports Calico and Cilium add-ons, both supports Network Policy.\n# Deny if at least on of them is not in the list of CNINames.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfos(obj)\n\n\tnot \"Calico\" in obj.data.CNINames\n\tnot \"Cilium\" in obj.data.CNINames\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfos(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" - }, - { - "name": "psp-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" - }, - { - "name": "resources-memory-limit-and-request", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.memory_request_max", - "settings.postureControlInputs.memory_request_min", - "settings.postureControlInputs.memory_limit_max", - "settings.postureControlInputs.memory_limit_min" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.memory_request_max", - "name": "memory_request_max", - "description": "Ensure a memory resource request is set and is under this defined maximum value." - }, - { - "path": "settings.postureControlInputs.memory_request_min", - "name": "memory_request_min", - "description": "Ensure a memory resource request is set and is above this defined minimum value." - }, - { - "path": "settings.postureControlInputs.memory_limit_max", - "name": "memory_limit_max", - "description": "Ensure a memory resource limit is set and is under this defined maximum value." - }, - { - "path": "settings.postureControlInputs.memory_limit_min", - "name": "memory_limit_min", - "description": "Ensure a memory resource limit is set and is under this defined maximum value." - } - ], - "description": "memory limits and requests are not set.", - "remediation": "Ensure memory limits and requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# ================================== no memory requests ==================================\n# Fails if pod does not have container with memory requests\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n\n# ============================================= memory requests exceed min/max =============================================\n\n# Fails if pod exceeds memory request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n\tpath := \"resources.requests.memory\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n\tpath := \"resources.requests.memory\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n\tpath := \"resources.requests.memory\" \n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# ============================================= memory limits exceed min/max =============================================\n\n# Fails if pod exceeds memory-limit \ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n\tpath := \"resources.limits.memory\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory-limit \", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory-limit \ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n\tpath := \"resources.limits.memory\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory-limit \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n\tpath := \"resources.limits.memory\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n######################################################################################################\n\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_min_limit_exceeded_memory(memory_limit)\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_max_limit_exceeded_memory(memory_limit)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_max_request_exceeded_memory(memory_req)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_min_request_exceeded_memory(memory_req)\n}\n\n# helpers\n\nis_max_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_max := data.postureControlInputs.memory_limit_max[_]\n\tcompare_max(memory_limit_max, memory_limit)\n}\n\nis_min_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_min := data.postureControlInputs.memory_limit_min[_]\n\tcompare_min(memory_limit_min, memory_limit)\n}\n\nis_max_request_exceeded_memory(memory_req) {\n\tmemory_req_max := data.postureControlInputs.memory_request_max[_]\n\tcompare_max(memory_req_max, memory_req)\n}\n\nis_min_request_exceeded_memory(memory_req) {\n\tmemory_req_min := data.postureControlInputs.memory_request_min[_]\n\tcompare_min(memory_req_min, memory_req)\n}\n\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n to_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n to_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n to_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tto_number(split_given) < to_number(split_min)\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tto_number(split_given) < to_number(split_min)\n\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tto_number(split_given) < to_number(split_min)\n\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tto_number(given) < to_number(min)\n\n}\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" - }, - { - "name": "psp-deny-allowed-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - }, - { - "name": "configured-readiness-probe", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Readiness probe is not configured", - "remediation": "Ensure Readiness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "alert-container-optimized-os-not-in-use", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n\n# checks if a node is not using a \"Container-Optimized OS\". \n# \"Container-Optimized OS\" prefixes are configured in 'container_optimized_os_prefixes'. \n# deny if 'nodes.status.nodeInfo.osImage' not starting with at least one item in 'container_optimized_os_prefixes'.\ndeny[msga] {\n\n\tnodes := input[_]\n\tnodes.kind == \"Node\"\n\n\t# list of \"Container-Optimized OS\" images prefixes \n\tcontainer_optimized_os_prefixes = [\"Bottlerocket\"]\n\n\t# check if osImage starts with at least one prefix\n\tsome str in container_optimized_os_prefixes\n\tnot startswith(nodes.status.nodeInfo.osImage, str)\n\n\t# prepare message data.\n\talert_message := \"Prefer using Container-Optimized OS when possible\"\n\n\tfailedPaths:= [\"status.nodeInfo.osImage\"]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [nodes]\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "lease-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "coordination.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Lease" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "rule-allow-privilege-escalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "resources-memory-requests", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "memory requests are not set.", - "remediation": "Ensure memory requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ================================== no memory requests ==================================\n# Fails if pod does not have container with memory requests\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NodeRestriction` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"NodeRestriction is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"NodeRestriction\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"NodeRestriction\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=NodeRestriction\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Always pull images.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```\n\n#### Impact Statement\nCredentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.\n\n#### Default Value\nBy default, `AlwaysPullImages` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"Admission control policy is not set to AlwaysPullImages\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"AlwaysPullImages\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"AlwaysPullImages\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=AlwaysPullImages\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```\n\n#### Impact Statement\nYou need to setup and maintain root certificate authority file.\n\n#### Default Value\nBy default, `--root-ca-file` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--root-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--root-ca-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - }, - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Verify kubelet's certificate before establishing connection.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, `--kubelet-certificate-authority` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority file is not specified\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--kubelet-certificate-authority\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--kubelet-certificate-authority=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ingress-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "serviceaccount-token-mount", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n spec := object.get(wl, start_of_path, [])\n\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(spec, sa.metadata.name)\n is_same_namespace(sa.metadata , wl.metadata)\n has_service_account_binding(sa)\n result := is_sa_auto_mounted_and_bound(spec, start_of_path, sa)\n\n failed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"fixPaths\": fixed_path,\n \"reviewPaths\": failed_path,\n \"failedPaths\": failed_path,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa\n }]\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted_and_bound(spec, start_of_path, sa) = [failed_path, fix_path] {\n # automountServiceAccountToken not in pod spec\n not spec.automountServiceAccountToken == false\n not spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n\n fix_path = { \"path\": sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", start_of_path)]), \"value\": \"false\"}\n failed_path = \"\"\n}\n\nis_sa_auto_mounted_and_bound(spec, start_of_path, sa) = [failed_path, fix_path] {\n # automountServiceAccountToken set to true in pod spec\n spec.automountServiceAccountToken == true\n\n failed_path = sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", start_of_path)])\n fix_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n paths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n paths[1] != \"\"\n} else = []\n\n\nis_same_sa(spec, serviceAccountName) {\n spec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n not spec.serviceAccountName \n serviceAccountName == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the given ServiceAccount\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == service_account.metadata.name\n role_binding.subjects[_].namespace == service_account.metadata.namespace\n role_binding.subjects[_].kind == \"ServiceAccount\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the system:authenticated group\n# which gives access to all authenticated users, including service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:authenticated\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the \"system:serviceaccounts\" group\n# which gives access to all service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:serviceaccounts\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" - }, - { - "name": "rule-deny-cronjobs", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob" - }, - "ruleLanguage": "rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if it's cronjob", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# alert cronjobs\n\n# handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" - }, - { - "name": "validate-kubelet-tls-configuration-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", - "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t# get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "pod-security-admission-baseline-applied-2", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "ensure-that-the-cni-in-use-supports-network-policies", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [], - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" - }, - { - "name": "ensure_network_policy_configured_in_labels", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "description": "fails if no networkpolicy configured in workload labels", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - }, - { - "name": "ensure-image-scanning-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "DescribeRepositories" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Check if image scanning enabled for EKS\ndeny[msga] {\n\tdescribe_repositories := input[_]\n\tdescribe_repositories.apiVersion == \"eks.amazonaws.com/v1\"\n\tdescribe_repositories.kind == \"DescribeRepositories\"\n\tdescribe_repositories.metadata.provider == \"eks\"\n\trepos := describe_repositories.data.Repositories\n\tsome repo in repos\n\tnot image_scanning_configured(repo)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": \"image scanning is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": describe_repositories,\n\t\t},\n\t}\n}\n\nimage_scanning_configured(repo) {\n\trepo.ImageScanningConfiguration.ScanOnPush == true\n}" - }, - { - "name": "automount-default-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" - }, - { - "name": "ingress-and-egress-blocked", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if there are no ingress and egress defined for pod", - "remediation": "Make sure you define ingress and egress policies for all your Pods", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" - }, - { - "name": "encrypt-traffic-to-https-load-balancers-with-tls-certificates", - "attributes": { - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails in case of 'Services' of type 'LoadBalancer' are not found.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type != \"LoadBalancer\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"No LoadBalancer service found.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n\t\t}\n\t}\n}\n\n# fails in case 'Service' object has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tnot svc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"]\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Service' object has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] != \"true\"\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Ingress' object has spec.tls value not set.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tnot isTLSSet(ingress.spec)\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has 'spec.tls' value not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"spec.tls\"],\n \t\"failedPaths\": [\"spec.tls\"],\n \t\"fixPaths\":[],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\n# fails in case 'Ingress' object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tisTLSSet(ingress.spec)\n\tingress.metadata.annotations[\"kubernetes.io/ingress.class\"] != \"azure/application-gateway\"\n\n\tpath := \"metadata.annotations[kubernetes.io/ingress.class]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"azure/application-gateway\"}],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\nisTLSSet(spec) {\n\tcount(spec.tls) > 0\n}\n" - }, - { - "name": "rule-can-delete-k8s-events-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "endpointslice-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "discovery.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "EndpointSlice" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "insecure-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - }, - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "list-all-validating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Validate admission controller" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns validating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" - }, - { - "name": "etcd-client-auth-cert", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Enable client authentication on etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```\n\n#### Impact Statement\nAll clients attempting to access the etcd server will require a valid client certificate.\n\n#### Default Value\nBy default, the etcd service can be queried by unauthenticated clients.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--client-cert-auth=false\", \"--client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "configured-liveness-probe", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Liveness probe is not configured", - "remediation": "Ensure Liveness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Encrypt etcd key-value store.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--encryption-provider-config` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is not set at all\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\n\tcmd := obj.spec.containers[0].command\n\tnot contains(concat(\" \", cmd), \"--encryption-provider-config\")\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config file not set\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--encryption-provider-config=\",\n\t\t}],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n# Encryption config is set but not covering secrets\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# Check if the config conver secrets\n\tcount({true | \"secrets\" in config_file_content.resources[_].resources}) == 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not covering secrets\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tfilter_input(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nfilter_input(obj){\n\tis_api_server(obj)\n}\nfilter_input(obj){\n\tis_control_plane_info(obj)\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeProxyInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "rule-access-dashboard-wl-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" - }, - { - "name": "label-usage-for-resources", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.recommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.recommendedLabels", - "name": "Recommended Labels", - "description": "Kubescape checks that workloads have at least one label that identifies semantic attributes." - } - ], - "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n" - }, - { - "name": "rule-can-portforward-v1", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "naked-pods", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", - "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" - }, - { - "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-scheduler\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" - }, - { - "name": "psp-deny-hostpid", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "psp-deny-allowprivilegeescalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "secret-etcd-encryption-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "CVE-2022-39328", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tclean_image := replace(image,\"-ubuntu\",\"\")\n\tversion := split(clean_image, \":\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 9\n\tminorVersion == 2\n\tsubVersion < 4\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - }, - { - "name": "rbac-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Activate garbage collector on pod termination, as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--terminated-pod-gc-threshold` is set to `12500`.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--terminated-pod-gc-threshold\")\n\tresult = {\n\t\t\"alert\": \"Please validate that --terminated-pod-gc-threshold is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--terminated-pod-gc-threshold\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"--terminated-pod-gc-threshold flag not set to an appropriate value\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--terminated-pod-gc-threshold=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"etcd is not configured to use TLS properly\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--etcd-certfile\", \"\"],\n\t\t[\"--etcd-keyfile\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "access-container-service-account-v1", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" - }, - { - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" - }, - { - "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", - "attributes": { - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxsize\")\n\tresult = {\n\t\t\"alert\": \"Please validate that audit-log-maxsize has an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxsize\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max size not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxsize=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubernetes API Server is not audited\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-path\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-path=/var/log/apiserver/audit.log\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "read-only-port-enabled-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet has read-only port enabled.", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"reviewPaths\": [\"readOnlyPort\"],\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - }, - { - "name": "has-image-signature", - "attributes": { - "useFromKubescapeVersion": "v2.1.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Ensures that all images contain some signature", - "remediation": "Replace the image with a signed image", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" - }, - { - "name": "audit-policy-content", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "APIServerInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\nimport future.keywords.in\n\n# CIS 3.2.2 https://workbench.cisecurity.org/sections/1126657/recommendations/1838583\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\tapi_server_info := obj.data.APIServerInfo\n\n\tnot contains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": api_server_info.cmdLine,\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\n\tapi_server_info := obj.data.APIServerInfo\n\n\tcontains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\trawPolicyFile := api_server_info.auditPolicyFile\n\tpolicyFile = yaml.unmarshal(base64.decode(rawPolicyFile.content))\n\n\tare_audit_file_rules_valid(policyFile.rules)\n\n\tfailed_obj := json.patch(policyFile, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"metadata\",\n\t\t\"value\": {\"name\": sprintf(\"%s - Audit policy file\", [obj.metadata.name])},\n\t}])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit policy rules do not cover key security areas or audit levels are invalid\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\n# Sample rules object\n# rules:\n# - level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nare_audit_file_rules_valid(rules) if {\n\tseeked_resources_with_audit_level := {\n\t\t\"secrets\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"configmaps\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"tokenreviews\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"pods\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"deployments\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/exec\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/portforward\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"services/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t}\n\n\t# Policy file must contain every resource\n\tsome resource, config in seeked_resources_with_audit_level\n\n\t# Every seeked resource mu have valid audit levels\n\tnot test_all_rules_against_one_seeked_resource(resource, config, rules)\n}\n\ntest_all_rules_against_one_seeked_resource(seeked_resource, value_of_seeked_resource, rules) if {\n\t# Filter down rules to only those concerning a seeked resource\n\trules_with_seeked_resource := [rule | rule := rules[_]; is_rule_concering_seeked_resource(rule, seeked_resource)]\n\trules_count := count(rules_with_seeked_resource)\n\n\t# Move forward only if there are some\n\trules_count > 0\n\n\t# Check if rules concerning seeked resource have valid audit levels\n\tvalid_rules := [rule | rule := rules_with_seeked_resource[_]; validate_rule_audit_level(rule, value_of_seeked_resource)]\n\tvalid_rules_count := count(valid_rules)\n\n\tvalid_rules_count > 0\n\n\t# Compare all rules for that specififc resource with those with valid rules, if amount of them differs,\n\t# it means that there are also some rules which invalid audit level\n\tvalid_rules_count == rules_count\n}\n\nis_rule_concering_seeked_resource(rule, seeked_resource) if {\n\tseeked_resource in rule.resources[_].resources\n}\n\n# Sample single rule:\n# \t level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nvalidate_rule_audit_level(rule, value_of_seeked_resource) := result if {\n\tvalue_of_seeked_resource.mode == \"equal\"\n\tresult := rule.level == value_of_seeked_resource.auditLevel\n} else := result {\n\tresult := rule.level != value_of_seeked_resource.auditLevel\n}\n\nis_api_server_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}" - }, - { - "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"DenyServiceExternalIPs\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "pod-security-admission-baseline-applied-1", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "insecure-port-flag", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" - }, - { - "name": "rule-identify-old-k8s-registry", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Identifying if pod container images are from deprecated K8s registry", - "remediation": "Use images new registry", - "ruleQuery": "", - "rule": "package armo_builtins\n\ndeprecatedK8sRepo[msga] {\n\tpod := input[_]\n\tpod.metadata.namespace == \"kube-system\"\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecated_registry(image){\n\tstartswith(image, \"k8s.gcr.io/\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.metadata.namespace == \"kube-system\"\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\n" - }, - { - "name": "ensure-endpointprivateaccess-is-enabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" - }, - { - "name": "ingress-no-tls", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "Ingress should not be configured without TLS", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\n\t# Check if ingress has TLS enabled\n\tnot ingress.spec.tls\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Ingress '%v' has not TLS definition\", [ingress.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n \"path\": \"spec.tls\",\n \"value\": \"\"\n }],\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"k8sApiObjects\": [ingress]}\n\t}\n}\n" - }, - { - "name": "resource-policies", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace has no resource policies defined", - "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tstart_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, start_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tstart_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, start_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, start_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" - }, - { - "name": "excessive_amount_of_vulnerabilities_pods", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed critical vulnerable pods", - "useFromKubescapeVersion": "v1.0.133", - "imageScanRelated": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "armo.vuln.images", - "image.vulnscan.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ImageVulnerabilities" - ] - } - ], - "configInputs": [ - "settings.postureControlInputs.max_critical_vulnerabilities", - "settings.postureControlInputs.max_high_vulnerabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.max_critical_vulnerabilities", - "name": "Max Critical vulnerabilities", - "description": "The maximum number of Critical severity vulnerabilities permitted." - }, - { - "path": "settings.postureControlInputs.max_high_vulnerabilities", - "name": "Max High vulnerabilities", - "description": "The maximum number of High severity vulnerabilities permitted." - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # Has ^ amount of vulnerabilities\n check_num_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \"reviewPaths\": [path],\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"Critical\" ])\n\n str_max := data.postureControlInputs.max_critical_vulnerabilities[_]\n exists > to_number(str_max)\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"High\" ])\n\n str_max := data.postureControlInputs.max_high_vulnerabilities[_]\n exists > to_number(str_max)\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n \n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}" - }, - { - "name": "kubelet-rotate-certificates", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --rotate-certificates argument is not set to false.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"rotateCertificates\"],\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "rule-can-update-configmap-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can update/patch the 'coredns' configmap", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled. This could potentially be exploited to uncover system and program details.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--profiling=true\")\n\tfixed = replace(cmd[i], \"--profiling=true\", \"--profiling=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Turn on Role Based Access Control.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nWhen RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.\n\n#### Default Value\nBy default, `RBAC` authorization is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"RBAC\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"RBAC\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=RBAC\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "resources-secret-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Secret" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "kubelet-ip-tables", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensures that the --make-iptables-util-chains argument is set to true.", - "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "etcd-tls-enabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Configure TLS encryption for the etcd service.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```\n\n#### Impact Statement\nClient connections only over TLS would be served.\n\n#### Default Value\nBy default, TLS encryption is not set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if tls is configured in a etcd service\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--cert-file\", \"\"],\n\t\t[\"--key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Retain 10 or an appropriate number of old log files.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxbackup\")\n\tresult = {\n\t\t\"alert\": \"Please validate that the audit log max backup is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxbackup\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max backup is not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxbackup=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", - "attributes": { - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```\n\n#### Impact Statement\nThe corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-key-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-key-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "etcd-peer-auto-tls-disabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-auto-tls=true\")\n\tfixed = replace(cmd[i], \"--peer-auto-tls=true\", \"--peer-auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "containers-mounting-docker-socket", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/run/containerd/containerd.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/crio/crio.sock\"\n}\n" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - }, - { - "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "etcd-peer-client-auth-cert", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "etcd should be configured for peer authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--peer-client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--peer-client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--peer-client-cert-auth=false\", \"--peer-client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "resources-cpu-requests", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "CPU requests are not set.", - "remediation": "Ensure CPU requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ==================================== no CPU requests =============================================\n# Fails if pod does not have container with CPU request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU requests\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU requests\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "anonymous-access-enabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in case anonymous or unauthenticated user has any rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", - "remediation": "Remove any RBAC rules which allow anonymous users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n subject := rolebinding.subjects[i]\n isAnonymous(subject)\n delete_path := sprintf(\"subjects[%d]\", [i])\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"deletePaths\": [delete_path],\n \"failedPaths\": [delete_path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(subject) {\n subject.name == \"system:anonymous\"\n}\n\nisAnonymous(subject) {\n subject.name == \"system:unauthenticated\"\n}\n" - }, - { - "name": "cluster-admin-role", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\n# regal ignore:rule-length\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "ensure-service-principle-has-read-only-permissions", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - }, - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if servicePrincipal has permissions that are not read-only\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"aks\"\n\n\troleAssignment := resources.data.roleAssignments[_]\n\troleAssignment.properties.principalType == \"ServicePrincipal\"\n\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"aks\"\n\n\tpolicy := policies.data.roleDefinitions[_]\n\tpolicy.id == roleAssignment.properties.roleDefinitionId\n\n\t# check if policy has at least one action that is not read\n\tsome action in policy.properties.permissions[_].actions\n\t\tnot endswith(action, \"read\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"ServicePrincipal has permissions that are not read-only to ACR.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" - }, - { - "name": "workload-mounted-configmap", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts ConfigMaps", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.configMap\n\n\tconfigMap := input[_]\n\tconfigMap.kind == \"ConfigMap\"\n\tconfigMap.metadata.name == volume.configMap.name\n\tis_same_namespace(configMap.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted configMap\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": configMap\n }]\n\t}\n}\n\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - }, - { - "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Automate service accounts management.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.\n\n#### Impact Statement\nNone.\n\n#### Default Value\nBy default, `ServiceAccount` is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"ServiceAccount\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"ServiceAccount\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Validate service account before validating token.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--service-account-lookup` argument is set to `true`.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) := invalid_flags[0] {\n\tinvalid_flags := [flag |\n\t\tsome i, c in cmd\n\t\tflag := get_result(c, i)\n\t]\n}\n\nget_result(cmd, i) = result {\n\tcmd == \"--service-account-lookup=false\"\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(cmd, i) = result {\n\tcmd != \"--service-account-lookup=false\"\n\tcontains(cmd, \"--service-account-lookup=false\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": replace(cmd, \"--service-account-lookup=false\", \"--service-account-lookup=true\"),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "rule-can-bind-escalate", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can or bind escalate roles/clusterroles", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "set-seLinuxOptions", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if workload and container do not define any seLinuxOptions", - "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - }, - { - "name": "poddisruptionbudget-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodDisruptionBudget" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "alert-fargate-not-in-use", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n\n\n# deny if fargate is not being used in any of the nodes in cluster.\n# a Node is identified as using fargate if it's name starts with 'fargate'.\ndeny[msga] {\n\n\n # get all nodes\n nodes := [node | node = input[_]; node.kind == \"Node\"]\n count(nodes) > 0\n\n # get all nodes without fargate\n nodes_not_fargate := [node | node = nodes[_]; not startswith(node.metadata.name, \"fargate\")]\n\n # if count of all nodes equals to count of nodes_not_fargate it means fargate is not being used.\n count(nodes) == count(nodes_not_fargate)\n\n\t# prepare message data.\n\talert_message := \"Consider Fargate for running untrusted workloads\"\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": nodes_not_fargate\n\t\t}\n\t}\n}" - }, - { - "name": "automount-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - }, - { - "name": "set-seccomp-profile-RuntimeDefault", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile as RuntimeDefault", - "remediation": "Make sure you define seccompProfile as RuntimeDefault at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n wl_spec := wl.spec\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl_spec := wl.spec.template.spec\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n wl_spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# container definition takes precedence\nget_seccompProfile_definition(wl, container, i, path_to_containers, path_to_search) = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type == \"RuntimeDefault\"\n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type != \"RuntimeDefault\"\n failed_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type == \"RuntimeDefault\" \n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type != \"RuntimeDefault\" \n\tfailed_path := sprintf(\"%s.%s\", [trim_suffix(concat(\".\", path_to_containers), \".containers\"), concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result{\n\tfix_path := [{\"path\": sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]), \"value\":\"RuntimeDefault\"}]\n\tseccompProfile_result := {\"failed\": true, \"failed_path\": [], \"fix_path\": fix_path}\n}\n" - }, - { - "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "endpoints-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Endpoints" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "CVE-2022-24348", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - }, - { - "name": "psp-deny-hostnetwork", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "role-in-default-namespace", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not use token based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.\n\n#### Impact Statement\nYou will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server TLS is not configured\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tre := \" ?--token-auth-file=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd[i], -1)\n\tcount(matchs) > 0\n\tfixed = replace(cmd[i], matchs[0][0], \"\")\n\tresult = get_result(sprintf(\"spec.containers[0].command[%d]\", [i]), fixed)\n}\n\n# Get fix and failed paths\nget_result(path, fixed) = result {\n\tfixed == \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(path, fixed) = result {\n\tfixed != \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed,\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "set-fsgroup-value", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n" - }, - { - "name": "rule-can-create-pod", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can create pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native-cis", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# CIS 3.2.1 https://workbench.cisecurity.org/sections/1126657/recommendations/1838582\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server(obj)\n\tcmd := obj.spec.containers[0].command\n\taudit_policy := [command | command := cmd[_]; contains(command, \"--audit-policy-file=\")]\n\tcount(audit_policy) < 1\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "list-role-definitions-in-acr", - "attributes": {}, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# return ListEntitiesForPolicies resource in azure\ndeny[msg] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.apiVersion == \"management.azure.com/v1\"\n\tresources.metadata.provider == \"aks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" - } -] \ No newline at end of file diff --git a/releaseDev/security.json b/releaseDev/security.json deleted file mode 100644 index 66a74c4a5..000000000 --- a/releaseDev/security.json +++ /dev/null @@ -1,3407 +0,0 @@ -{ - "name": "security", - "description": "Controls that are used to assess security threats.", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "security" - ], - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "version": null, - "controls": [ - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "insecure-port-flag", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - } - ] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - } - ] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-allow-privilege-escalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "immutable-container-filesystem", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" - } - ] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-pid-ipc-privileges", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - } - ] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-network-access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-rw-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines if any workload contains a hostPath volume with rw permissions", - "remediation": "Set the readOnly field of the mount to true", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"deletePaths\": failed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n}" - } - ] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "insecure-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - } - ] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Container runtime socket mounted", - "attributes": { - "controlTypeTags": [ - "devops", - "smartRemediation" - ] - }, - "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", - "remediation": "Remove container runtime socket mount request or define an exception.", - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "containers-mounting-docker-socket", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/run/containerd/containerd.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/crio/crio.sock\"\n}\n" - } - ] - }, - { - "name": "Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" - ], - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - }, - { - "name": "immutable-container-filesystem", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" - }, - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - }, - { - "name": "drop-capability-netraw", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "set-seLinuxOptions", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if workload and container do not define any seLinuxOptions", - "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-seccomp-profile", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-procmount-default", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if container does not define securityContext.procMount to Default.", - "remediation": "Set securityContext.procMount to Default", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n" - }, - { - "name": "set-fsgroup-value", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n" - }, - { - "name": "set-fsgroupchangepolicy-value", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" - }, - { - "name": "set-sysctls-params", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.sysctls is not set.", - "remediation": "Set securityContext.sysctls params", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has sysctls set\n not pod.spec.securityContext.sysctls\n\n path := \"spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.sysctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has sysctls set\n not wl.spec.template.spec.securityContext.sysctls\n\n path := \"spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.sysctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has sysctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tfixPaths := [{\"path\": sprintf(\"%s.name\", [path]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"%s.value\", [path]), \"value\": \"YOUR_VALUE\"}]\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.sysctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "set-supplementalgroups-values", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.supplementalgroups is not set.", - "remediation": "Set securityContext.supplementalgroups values", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n\tnot pod.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n\tnot wl.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n\tnot cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tfixPaths = [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\", \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n" - }, - { - "name": "rule-allow-privilege-escalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "Workload with secret access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Secret Access" - ] - } - ] - }, - "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", - "controlID": "C-0255", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "workload-mounted-secrets", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Secret" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.secret\n\n\tsecret := input[_]\n\tsecret.kind == \"Secret\"\n\tsecret.metadata.name == volume.secret.secretName\n\tis_same_namespace(secret.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted secret\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": secret\n }]\n\t}\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Exposure to Internet", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "service-destruction", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "external-workload-with-cluster-takeover-roles", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "external-database-without-authentication", - "categories": [ - "Initial Access" - ] - } - ] - }, - "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", - "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", - "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", - "controlID": "C-0256", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "exposure-to-internet", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n\t\t \"reviewPaths\": failPath,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n\n # Make sure that they belong to the same namespace\n svc.metadata.namespace == ingress.metadata.namespace\n\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [\n\t\t{\n\t \"object\": ingress,\n\t\t \"reviewPaths\": result,\n\t \"failedPaths\": result,\n\t },\n\t\t{\n\t \"object\": svc,\n\t\t}\n ]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n" - } - ] - }, - { - "name": "Workload with PVC access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Collection" - ] - } - ] - }, - "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", - "controlID": "C-0257", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "rules": [ - { - "name": "workload-mounted-pvc", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts PVC", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Workload with configMap access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Collection" - ] - } - ] - }, - "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", - "controlID": "C-0258", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "workload-mounted-configmap", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts ConfigMaps", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.configMap\n\n\tconfigMap := input[_]\n\tconfigMap.kind == \"ConfigMap\"\n\tconfigMap.metadata.name == volume.configMap.name\n\tis_same_namespace(configMap.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted configMap\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": configMap\n }]\n\t}\n}\n\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Workload with credential access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "This control checks if workloads specifications have sensitive information in their environment variables.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", - "controlID": "C-0259", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - } - ] - }, - { - "name": "Missing network policy", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ensure_network_policy_configured_in_labels", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "description": "fails if no networkpolicy configured in workload labels", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "ServiceAccount token mounted", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", - "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", - "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", - "controlID": "C-0261", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "serviceaccount-token-mount", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n spec := object.get(wl, start_of_path, [])\n\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(spec, sa.metadata.name)\n is_same_namespace(sa.metadata , wl.metadata)\n has_service_account_binding(sa)\n result := is_sa_auto_mounted_and_bound(spec, start_of_path, sa)\n\n failed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"fixPaths\": fixed_path,\n \"reviewPaths\": failed_path,\n \"failedPaths\": failed_path,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa\n }]\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted_and_bound(spec, start_of_path, sa) = [failed_path, fix_path] {\n # automountServiceAccountToken not in pod spec\n not spec.automountServiceAccountToken == false\n not spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n\n fix_path = { \"path\": sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", start_of_path)]), \"value\": \"false\"}\n failed_path = \"\"\n}\n\nis_sa_auto_mounted_and_bound(spec, start_of_path, sa) = [failed_path, fix_path] {\n # automountServiceAccountToken set to true in pod spec\n spec.automountServiceAccountToken == true\n\n failed_path = sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", start_of_path)])\n fix_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n paths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n paths[1] != \"\"\n} else = []\n\n\nis_same_sa(spec, serviceAccountName) {\n spec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n not spec.serviceAccountName \n serviceAccountName == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the given ServiceAccount\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == service_account.metadata.name\n role_binding.subjects[_].namespace == service_account.metadata.namespace\n role_binding.subjects[_].kind == \"ServiceAccount\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the system:authenticated group\n# which gives access to all authenticated users, including service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:authenticated\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the \"system:serviceaccounts\" group\n# which gives access to all service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:serviceaccounts\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" - } - ] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "anonymous-access-enabled", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in case anonymous or unauthenticated user has any rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", - "remediation": "Remove any RBAC rules which allow anonymous users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n subject := rolebinding.subjects[i]\n isAnonymous(subject)\n delete_path := sprintf(\"subjects[%d]\", [i])\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"deletePaths\": [delete_path],\n \"failedPaths\": [delete_path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(subject) {\n subject.name == \"system:anonymous\"\n}\n\nisAnonymous(subject) {\n subject.name == \"system:unauthenticated\"\n}\n" - } - ] - }, - { - "controlID": "C-0265", - "name": "Authenticated user has sensitive permissions", - "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "system-authenticated-allowed-to-take-over-cluster", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in system:authenticated user has cluster takeover rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", - "remediation": "Remove any RBAC rules which allow system:authenticated users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n subjectVector := input[_]\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\n subject := rolebinding.subjects[k]\n # Check if the subject is gourp\n subject.kind == \"Group\"\n # Check if the subject is system:authenticated\n subject.name == \"system:authenticated\"\n\n\n # Find the bound roles\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n # Check if the role and rolebinding bound\n is_same_role_and_binding(role, rolebinding)\n\n\n # Check if the role has access to workloads, exec, attach, portforward\n\trule := role.rules[p]\n rule.resources[l] in [\"*\",\"pods\", \"pods/exec\", \"pods/attach\", \"pods/portforward\",\"deployments\",\"statefulset\",\"daemonset\",\"jobs\",\"cronjobs\",\"nodes\",\"secrets\"]\n\n\tfinalpath := array.concat([\"\"], [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [i]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": \"system:authenticated has sensitive roles\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\" : subjectVector\n\t\t},\n\t}\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"RoleBinding\"\n role.kind == \"Role\"\n rolebinding.metadata.namespace == role.metadata.namespace\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"ClusterRoleBinding\"\n role.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}" - } - ] - }, - { - "name": "Workload with cluster takeover roles", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "external-workload-with-cluster-takeover-roles", - "categories": [ - "Cluster Access" - ], - "displayRelatedResources": true, - "clickableResourceKind": "ServiceAccount" - } - ] - }, - "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", - "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", - "long_description": "In Kubernetes, workloads with overly permissive roles pose a significant security risk. When a workload is granted roles that exceed the necessities of its operation, it creates an attack surface for privilege escalation within the cluster. This is especially critical if the roles include permissions for creating, updating, or accessing sensitive resources or secrets. An attacker exploiting such a workload can leverage these excessive privileges to perform unauthorized actions, potentially leading to a full cluster takeover. Ensuring that each service account associated with a workload is limited to permissions that are strictly necessary for its function is crucial in mitigating the risk of cluster takeovers.", - "test": "Check if the service account used by a workload has cluster takeover roles.", - "controlID": "C-0267", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "workload-with-cluster-takeover-roles", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_start_of_path(wl)\n wl_spec := object.get(wl, start_of_path, [])\n\n # get service account wl is using\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(wl_spec, sa.metadata, wl.metadata)\n\n # check service account token is mounted\n is_sa_auto_mounted(wl_spec, sa)\n\n # check if sa has cluster takeover roles\n role := input[_]\n role.kind in [\"Role\", \"ClusterRole\"]\n is_takeover_role(role)\n\n rolebinding := input[_]\n\trolebinding.kind in [\"RoleBinding\", \"ClusterRoleBinding\"] \n rolebinding.roleRef.name == role.metadata.name\n rolebinding.subjects[j].kind == \"ServiceAccount\"\n rolebinding.subjects[j].name == sa.metadata.name\n rolebinding.subjects[j].namespace == sa.metadata.namespace\n\n reviewPath := \"roleRef\"\n deletePath := sprintf(\"subjects[%d]\", [j])\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v has cluster takeover roles\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa,\n },\n {\n \"object\": rolebinding,\n\t\t \"reviewPaths\": [reviewPath],\n \"deletePaths\": [deletePath],\n },\n {\n \"object\": role,\n },]\n }\n}\n\n\nget_start_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken not in pod spec\n not wl_spec.automountServiceAccountToken == false\n not wl_spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n}\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken set to true in pod spec\n wl_spec.automountServiceAccountToken == true\n}\n\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n wl_spec.serviceAccountName == sa_metadata.name\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n not wl_spec.serviceAccountName \n sa_metadata.name == \"default\"\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\n# is_same_namespace supports cases where ns is not configured in the metadata\n# for yaml scans\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n\n# look for rule allowing create/update workloads\nis_takeover_role(role){\n takeover_resources := [\"pods\", \"*\"]\n takeover_verbs := [\"create\", \"update\", \"patch\", \"*\"]\n takeover_api_groups := [\"\", \"*\"]\n \n takeover_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in takeover_resources ; \n rule.verbs[b] in takeover_verbs ; \n rule.apiGroups[c] in takeover_api_groups]\n count(takeover_rule) > 0\n}\n\n# look for rule allowing secret access\nis_takeover_role(role){\n rule := role.rules[i]\n takeover_resources := [\"secrets\", \"*\"]\n takeover_verbs := [\"get\", \"list\", \"watch\", \"*\"]\n takeover_api_groups := [\"\", \"*\"]\n \n takeover_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in takeover_resources ; \n rule.verbs[b] in takeover_verbs ; \n rule.apiGroups[c] in takeover_api_groups]\n count(takeover_rule) > 0\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" - } - ] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-cpu-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "CPU limits are not set.", - "remediation": "Ensure CPU limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-memory-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "memory limits are not set.", - "remediation": "Ensure memory limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" - } - ] - }, - { - "name": "Workload with administrative roles", - "attributes": {}, - "description": "This control identifies workloads where the associated service accounts have roles that grant administrative-level access across the cluster. Granting a workload such expansive permissions equates to providing it cluster admin roles. This level of access can pose a significant security risk, as it allows the workload to perform any action on any resource, potentially leading to unauthorized data access or cluster modifications.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use service accounts with such high permissions for daily operations.", - "long_description": "In Kubernetes environments, workloads granted administrative-level privileges without restrictions represent a critical security vulnerability. When a service account associated with a workload is configured with permissions to perform any action on any resource, it essentially holds unrestricted access within the cluster, akin to cluster admin privileges. This configuration dramatically increases the risk of security breaches, including data theft, unauthorized modifications, and potentially full cluster takeovers. Such privileges allow attackers to exploit the workload for wide-ranging malicious activities, bypassing the principle of least privilege. Therefore, it's essential to follow the least privilege principle and make sure cluster admin permissions are granted only when it is absolutely necessary.", - "test": "Check if the service account used by a workload has cluster admin roles, either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges.", - "controlID": "C-0272", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "workload-with-administrative-roles", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_start_of_path(wl)\n wl_spec := object.get(wl, start_of_path, [])\n\n # get service account wl is using\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(wl_spec, sa.metadata, wl.metadata)\n\n # check service account token is mounted\n is_sa_auto_mounted(wl_spec, sa)\n\n # check if sa has administrative roles\n role := input[_]\n role.kind in [\"Role\", \"ClusterRole\"]\n is_administrative_role(role)\n\n rolebinding := input[_]\n\trolebinding.kind in [\"RoleBinding\", \"ClusterRoleBinding\"] \n rolebinding.roleRef.name == role.metadata.name\n rolebinding.subjects[j].kind == \"ServiceAccount\"\n rolebinding.subjects[j].name == sa.metadata.name\n rolebinding.subjects[j].namespace == sa.metadata.namespace\n\n reviewPath := \"roleRef\"\n deletePath := sprintf(\"subjects[%d]\", [j])\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v has administrative roles\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa,\n },\n {\n \"object\": rolebinding,\n\t\t \"reviewPaths\": [reviewPath],\n \"deletePaths\": [deletePath],\n },\n {\n \"object\": role,\n },]\n }\n}\n\n\nget_start_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken not in pod spec\n not wl_spec.automountServiceAccountToken == false\n not wl_spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n}\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken set to true in pod spec\n wl_spec.automountServiceAccountToken == true\n}\n\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n wl_spec.serviceAccountName == sa_metadata.name\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n not wl_spec.serviceAccountName \n sa_metadata.name == \"default\"\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\n# is_same_namespace supports cases where ns is not configured in the metadata\n# for yaml scans\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n\nis_administrative_role(role){\n administrative_resources := [\"*\"]\n administrative_verbs := [\"*\"]\n administrative_api_groups := [\"\", \"*\"]\n \n administrative_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in administrative_resources ; \n rule.verbs[b] in administrative_verbs ; \n rule.apiGroups[c] in administrative_api_groups]\n count(administrative_rule) > 0\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}" - } - ] - }, - { - "name": "Outdated Kubernetes version", - "attributes": {}, - "description": "Identifies Kubernetes clusters running on outdated versions. Using old versions can expose clusters to known vulnerabilities, compatibility issues, and miss out on improved features and security patches. Keeping Kubernetes up-to-date is crucial for maintaining security and operational efficiency.", - "remediation": "Regularly update Kubernetes clusters to the latest stable version to mitigate known vulnerabilities and enhance functionality. Plan and execute upgrades considering workload compatibility, testing in a staging environment before applying changes to production. Follow Kubernetes' best practices for version management and upgrades to ensure a smooth transition and minimal downtime.", - "long_description": "Running an outdated version of Kubernetes poses significant security risks and operational challenges. Older versions may contain unpatched vulnerabilities, leading to potential security breaches and unauthorized access. Additionally, outdated clusters might not support newer, more secure, and efficient features, impacting both performance and security. Regularly updating Kubernetes ensures compliance with the latest security standards and access to enhanced functionalities.", - "test": "Verifies the current Kubernetes version against the latest stable releases.", - "controlID": "C-0273", - "baseScore": 2.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "outdated-k8s-version", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\tnode := input[_]\n\tnode.kind == \"Node\"\n\tcurrent_version := node.status.nodeInfo.kubeletVersion\n has_outdated_version(current_version)\n\tpath := \"status.nodeInfo.kubeletVersion\"\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Your kubelet version: %s, in node: %s is outdated\", [current_version, node.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [node]},\n\t}\n}\n\n\nhas_outdated_version(version) {\n\t# the `supported_k8s_versions` is validated in the validations script against \"https://api.github.com/repos/kubernetes/kubernetes/releases\"\n supported_k8s_versions := [\"v1.29\", \"v1.28\", \"v1.27\"] \n\tevery v in supported_k8s_versions{\n\t\tnot startswith(version, v)\n\t}\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0005", - "C-0012", - "C-0013", - "C-0016", - "C-0017", - "C-0034", - "C-0035", - "C-0038", - "C-0041", - "C-0044", - "C-0045", - "C-0046", - "C-0048", - "C-0057", - "C-0066", - "C-0069", - "C-0070", - "C-0074", - "C-0211", - "C-0255", - "C-0256", - "C-0257", - "C-0258", - "C-0259", - "C-0260", - "C-0261", - "C-0262", - "C-0265", - "C-0267", - "C-0270", - "C-0271", - "C-0272", - "C-0273" - ] -} \ No newline at end of file diff --git a/releaseDev/security_frameworks.json b/releaseDev/security_frameworks.json deleted file mode 100644 index 639ce4869..000000000 --- a/releaseDev/security_frameworks.json +++ /dev/null @@ -1,2569 +0,0 @@ -[ - { - "name": "WorkloadScan", - "description": "Framework for scanning a workload", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "security" - ], - "version": null, - "controls": [ - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0236", - "name": "Verify image signature", - "description": "Verifies the signature of each image with given public keys", - "long_description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "manual_test": "", - "references": [], - "attributes": { - "actionRequired": "configuration" - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0237", - "name": "Check if signature exists", - "description": "Ensures that all images contain some signature", - "long_description": "Verifies that each image is signed", - "remediation": "Replace the image with a signed image", - "manual_test": "", - "references": [], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workload with PVC access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Collection" - ] - } - ] - }, - "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", - "controlID": "C-0257", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "rules": [] - }, - { - "name": "Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "category": { - "name": "Workload", - "subCategory": { - "name": "Secrets", - "id": "Cat-3" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Missing network policy", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Linux hardening", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0078", - "C-0236", - "C-0237", - "C-0045", - "C-0048", - "C-0257", - "C-0207", - "C-0034", - "C-0012", - "C-0041", - "C-0260", - "C-0044", - "C-0038", - "C-0046", - "C-0013", - "C-0016", - "C-0017", - "C-0055", - "C-0057", - "C-0270", - "C-0271" - ] - }, - { - "name": "security", - "description": "Controls that are used to assess security threats.", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "security" - ], - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "version": null, - "controls": [ - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Container runtime socket mounted", - "attributes": { - "controlTypeTags": [ - "devops", - "smartRemediation" - ] - }, - "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", - "remediation": "Remove container runtime socket mount request or define an exception.", - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies pods that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" - ], - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workload with secret access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Secret Access" - ] - } - ] - }, - "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", - "controlID": "C-0255", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Exposure to Internet", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "service-destruction", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "external-workload-with-cluster-takeover-roles", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "external-database-without-authentication", - "categories": [ - "Initial Access" - ] - } - ] - }, - "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", - "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", - "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", - "controlID": "C-0256", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Workload with PVC access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Collection" - ] - } - ] - }, - "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", - "controlID": "C-0257", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "rules": [] - }, - { - "name": "Workload with configMap access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Collection" - ] - } - ] - }, - "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", - "controlID": "C-0258", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workload with credential access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "This control checks if workloads specifications have sensitive information in their environment variables.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", - "controlID": "C-0259", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Missing network policy", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "ServiceAccount token mounted", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", - "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", - "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", - "controlID": "C-0261", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0265", - "name": "Authenticated user has sensitive permissions", - "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Workload with cluster takeover roles", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "external-workload-with-cluster-takeover-roles", - "categories": [ - "Cluster Access" - ], - "displayRelatedResources": true, - "clickableResourceKind": "ServiceAccount" - } - ] - }, - "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", - "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", - "long_description": "In Kubernetes, workloads with overly permissive roles pose a significant security risk. When a workload is granted roles that exceed the necessities of its operation, it creates an attack surface for privilege escalation within the cluster. This is especially critical if the roles include permissions for creating, updating, or accessing sensitive resources or secrets. An attacker exploiting such a workload can leverage these excessive privileges to perform unauthorized actions, potentially leading to a full cluster takeover. Ensuring that each service account associated with a workload is limited to permissions that are strictly necessary for its function is crucial in mitigating the risk of cluster takeovers.", - "test": "Check if the service account used by a workload has cluster takeover roles.", - "controlID": "C-0267", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workload with administrative roles", - "attributes": {}, - "description": "This control identifies workloads where the associated service accounts have roles that grant administrative-level access across the cluster. Granting a workload such expansive permissions equates to providing it cluster admin roles. This level of access can pose a significant security risk, as it allows the workload to perform any action on any resource, potentially leading to unauthorized data access or cluster modifications.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use service accounts with such high permissions for daily operations.", - "long_description": "In Kubernetes environments, workloads granted administrative-level privileges without restrictions represent a critical security vulnerability. When a service account associated with a workload is configured with permissions to perform any action on any resource, it essentially holds unrestricted access within the cluster, akin to cluster admin privileges. This configuration dramatically increases the risk of security breaches, including data theft, unauthorized modifications, and potentially full cluster takeovers. Such privileges allow attackers to exploit the workload for wide-ranging malicious activities, bypassing the principle of least privilege. Therefore, it's essential to follow the least privilege principle and make sure cluster admin permissions are granted only when it is absolutely necessary.", - "test": "Check if the service account used by a workload has cluster admin roles, either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges.", - "controlID": "C-0272", - "baseScore": 6.0, - "category": { - "name": "Workload", - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Outdated Kubernetes version", - "attributes": {}, - "description": "Identifies Kubernetes clusters running on outdated versions. Using old versions can expose clusters to known vulnerabilities, compatibility issues, and miss out on improved features and security patches. Keeping Kubernetes up-to-date is crucial for maintaining security and operational efficiency.", - "remediation": "Regularly update Kubernetes clusters to the latest stable version to mitigate known vulnerabilities and enhance functionality. Plan and execute upgrades considering workload compatibility, testing in a staging environment before applying changes to production. Follow Kubernetes' best practices for version management and upgrades to ensure a smooth transition and minimal downtime.", - "long_description": "Running an outdated version of Kubernetes poses significant security risks and operational challenges. Older versions may contain unpatched vulnerabilities, leading to potential security breaches and unauthorized access. Additionally, outdated clusters might not support newer, more secure, and efficient features, impacting both performance and security. Regularly updating Kubernetes ensures compliance with the latest security standards and access to enhanced functionalities.", - "test": "Verifies the current Kubernetes version against the latest stable releases.", - "controlID": "C-0273", - "baseScore": 2.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0005", - "C-0012", - "C-0013", - "C-0016", - "C-0017", - "C-0034", - "C-0035", - "C-0038", - "C-0041", - "C-0044", - "C-0045", - "C-0046", - "C-0048", - "C-0057", - "C-0066", - "C-0069", - "C-0070", - "C-0074", - "C-0211", - "C-0255", - "C-0256", - "C-0257", - "C-0258", - "C-0259", - "C-0260", - "C-0261", - "C-0262", - "C-0265", - "C-0267", - "C-0270", - "C-0271", - "C-0272", - "C-0273" - ] - }, - { - "name": "ClusterScan", - "description": "Framework for scanning a cluster", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "security" - ], - "version": null, - "controls": [ - { - "name": "Secret/etcd encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "RBAC enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0265", - "name": "Authenticated user has sensitive permissions", - "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to system:authenticated group.", - "attributes": {}, - "baseScore": 7, - "category": { - "name": "Control plane", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Prevent containers from allowing command execution", - "attributes": { - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Roles with delete capabilities", - "attributes": { - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Validate admission controller (validating)", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0036", - "baseScore": 3.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Validate admission controller (mutating)", - "attributes": { - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0039", - "baseScore": 4.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Administrative Roles", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" - ], - "attributes": {}, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" - ], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Missing network policy", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Exposure to internet", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "service-destruction", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "external-workload-with-cluster-takeover-roles", - "categories": [ - "Initial Access" - ] - }, - { - "attackTrack": "external-database-without-authentication", - "categories": [ - "Initial Access" - ] - } - ] - }, - "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", - "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", - "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", - "controlID": "C-0256", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0066", - "C-0088", - "C-0067", - "C-0005", - "C-0262", - "C-0265", - "C-0015", - "C-0002", - "C-0007", - "C-0063", - "C-0036", - "C-0039", - "C-0035", - "C-0188", - "C-0187", - "C-0012", - "C-0260", - "C-0256", - "C-0038", - "C-0041", - "C-0048", - "C-0057", - "C-0013" - ] - } -] \ No newline at end of file diff --git a/releaseDev/soc2.json b/releaseDev/soc2.json deleted file mode 100644 index 2ef630fe9..000000000 --- a/releaseDev/soc2.json +++ /dev/null @@ -1,537 +0,0 @@ -{ - "name": "SOC2", - "description": "SOC2 compliance related controls", - "attributes": { - "armoBuiltin": true - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Firewall (CC6.1,CC6.6,CC7.2)", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management.", - "remediation": "Define network policies for all workloads to protect unwanted access", - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ensure_network_policy_configured_in_labels", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "description": "fails if no networkpolicy configured in workload labels", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ], - "long_description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management." - }, - { - "name": "Cryptographic key management - misplaced secrets (CC6.1,CC6.6,CC6.7)", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - } - ] - }, - { - "name": "Cryptographic key management - minimize access to secrets (CC6.1,CC6.6,CC6.7)", - "controlID": "C-0186", - "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", - "long_description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" - ], - "attributes": {}, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Access restriction to infrastructure - admin access (CC6.1 ,CC6.2, CC6.7, CC6.8)", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "category": { - "name": "Access control", - "id": "Cat-2" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Event logging (CC6.8,CC7.1,CC7.2)", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers.", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers: - Logon attempts - Data deletions - Application and system errors - Changes to software and configuration settings - Changes to system files, configuration files or content files The logs are monitored by IT Operations staff and significant issues are investigated and resolved within a timely manner.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "category": { - "name": "Control plane", - "id": "Cat-1" - }, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Data in motion encryption - Ingress is TLS encrypted (CC6.1,CC6.6,CC6.7)", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", - "remediation": "The user needs to implement TLS for the Ingress resource in order to encrypt the incoming traffic", - "test": "Check if the Ingress resource has TLS configured", - "controlID": "C-0263", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ingress-no-tls", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "Ingress should not be configured without TLS", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\n\t# Check if ingress has TLS enabled\n\tnot ingress.spec.tls\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Ingress '%v' has not TLS definition\", [ingress.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n \"path\": \"spec.tls\",\n \"value\": \"\"\n }],\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"k8sApiObjects\": [ingress]}\n\t}\n}\n" - } - ], - "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." - } - ], - "ControlsIDs": [ - "C-0260", - "C-0012", - "C-0186", - "C-0035", - "C-0067", - "C-0263" - ] -} \ No newline at end of file diff --git a/releaseDev/workloadscan.json b/releaseDev/workloadscan.json deleted file mode 100644 index f59cb32e2..000000000 --- a/releaseDev/workloadscan.json +++ /dev/null @@ -1,2021 +0,0 @@ -{ - "name": "WorkloadScan", - "description": "Framework for scanning a workload", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "security" - ], - "version": null, - "controls": [ - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useUntilKubescapeVersion": "v2.3.8" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - }, - { - "name": "container-image-repository-v1", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "useFromKubescapeVersion": "v2.9.0" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n" - } - ] - }, - { - "controlID": "C-0236", - "name": "Verify image signature", - "description": "Verifies the signature of each image with given public keys", - "long_description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "manual_test": "", - "references": [], - "attributes": { - "actionRequired": "configuration" - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "verify-image-signature", - "attributes": { - "useFromKubescapeVersion": "v2.1.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "ruleQuery": "armo_builtins", - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.trustedCosignPublicKeys", - "name": "Trusted Cosign public keys", - "description": "A list of trusted Cosign public keys that are used for validating container image signatures." - } - ], - "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.containers[%v].image\", [i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0237", - "name": "Check if signature exists", - "description": "Ensures that all images contain some signature", - "long_description": "Verifies that each image is signed", - "remediation": "Replace the image with a signed image", - "manual_test": "", - "references": [], - "attributes": {}, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "category": { - "name": "Workload", - "subCategory": { - "name": "Supply chain", - "id": "Cat-6" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "has-image-signature", - "attributes": { - "useFromKubescapeVersion": "v2.1.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Ensures that all images contain some signature", - "remediation": "Replace the image with a signed image", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" - } - ] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in Pod spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-rw-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines if any workload contains a hostPath volume with rw permissions", - "remediation": "Set the readOnly field of the mount to true", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, start_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"deletePaths\": failed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n}" - } - ] - }, - { - "name": "HostPath mount", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [result],\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v].hostPath.path\", [start_of_path, format_int(i, 10)])\n}" - } - ] - }, - { - "name": "Workload with PVC access", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Collection" - ] - } - ] - }, - "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", - "controlID": "C-0257", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "category": { - "name": "Workload", - "subCategory": { - "name": "Storage", - "id": "Cat-8" - }, - "id": "Cat-5" - }, - "rules": [ - { - "name": "workload-mounted-pvc", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts PVC", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" - ], - "attributes": {}, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "category": { - "name": "Workload", - "subCategory": { - "name": "Secrets", - "id": "Cat-3" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-secrets-in-env-var", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if Pods have secrets in environment variables", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "category": { - "name": "Secrets", - "id": "Cat-3" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed", - "settings.postureControlInputs.sensitiveKeyNamesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Sensitive Values", - "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "Allowed Values", - "description": "Reduce false positives with known values." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Sensitive Keys", - "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", - "name": "Allowed Keys", - "description": "Reduce false positives with known key names." - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}" - } - ] - }, - { - "name": "HostNetwork access", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", - "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Network", - "id": "Cat-4" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-network-access", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - } - ] - }, - { - "name": "Missing network policy", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Lateral Movement (Network)" - ] - } - ] - }, - "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ensure_network_policy_configured_in_labels", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "description": "fails if no networkpolicy configured in workload labels", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops" - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "category": { - "name": "Network", - "id": "Cat-4" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-pid-ipc-privileges", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - } - ] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Privilege Escalation (Node)" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "insecure-capabilities", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - } - ] - }, - { - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify that runAsUser is set to a user id greater than 0 or that runAsNonRoot is set to true, and that runAsGroup is set to an id greater than 0. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "non-root-containers", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n" - } - ] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-allow-privilege-escalation", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, start_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "smartRemediation" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "immutable-container-filesystem", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n" - } - ] - }, - { - "name": "Linux hardening", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "linux-hardening", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define any linux security hardening", - "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "smartRemediation" - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in Pod spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Node escape", - "id": "Cat-9" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "Ensure CPU limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the CPU limits are not set.", - "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0270", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-cpu-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "CPU limits are not set.", - "remediation": "Ensure CPU limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "name": "Ensure memory limits are set", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops", - "security" - ], - "attackTracks": [ - { - "attackTrack": "service-destruction", - "categories": [ - "Denial of service" - ] - } - ] - }, - "description": "This control identifies all Pods for which the memory limits are not set.", - "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0271", - "baseScore": 8.0, - "category": { - "name": "Workload", - "subCategory": { - "name": "Resource management", - "id": "Cat-7" - }, - "id": "Cat-5" - }, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-memory-limits", - "attributes": {}, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "memory limits are not set.", - "remediation": "Ensure memory limits are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0078", - "C-0236", - "C-0237", - "C-0045", - "C-0048", - "C-0257", - "C-0207", - "C-0034", - "C-0012", - "C-0041", - "C-0260", - "C-0044", - "C-0038", - "C-0046", - "C-0013", - "C-0016", - "C-0017", - "C-0055", - "C-0057", - "C-0270", - "C-0271" - ] -} \ No newline at end of file From 395f66567482c2f3a3f1abf3b9b11f332de0c782 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 12 Mar 2024 11:47:27 +0200 Subject: [PATCH 135/195] upgrade opa-utils Signed-off-by: YiscahLevySilas1 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9526dcc00..c1f88515f 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/armosec/armoapi-go v0.0.330 github.com/go-gota/gota v0.12.0 - github.com/kubescape/opa-utils v0.0.279-0.20240306142553-f6c8e3e85e5b + github.com/kubescape/opa-utils v0.0.279 github.com/stretchr/testify v1.8.4 go.uber.org/zap v1.27.0 k8s.io/utils v0.0.0-20230726121419-3b25d923346b diff --git a/go.sum b/go.sum index b69e8831b..492724314 100644 --- a/go.sum +++ b/go.sum @@ -362,8 +362,8 @@ github.com/kubescape/go-logger v0.0.22 h1:gle7wH6emOiGv9ljdpVi82pWLQ3jGucrUucvil github.com/kubescape/go-logger v0.0.22/go.mod h1:x3HBpZo3cMT/WIdy18BxvVVd5D0e/PWFVk/HiwBNu3g= github.com/kubescape/k8s-interface v0.0.161 h1:v6b3/kmA4o/2niNrejrbXj5X9MLfH0UrpI3s+e/fdwc= github.com/kubescape/k8s-interface v0.0.161/go.mod h1:oF+Yxug3Kpfu9Yr2j63wy7gwswrKXpiqI0mLk/7gF/s= -github.com/kubescape/opa-utils v0.0.279-0.20240306142553-f6c8e3e85e5b h1:lY9f5LfjD6cy+remULO//ey2FK+mVEJSIxjatuu+6kI= -github.com/kubescape/opa-utils v0.0.279-0.20240306142553-f6c8e3e85e5b/go.mod h1:N/UnbZHpoiHQH7O50yadhIXZvVl0IVtTGBmePPrSQSg= +github.com/kubescape/opa-utils v0.0.279 h1:a+w9rAPVkNEKONVtswsVdRpw4LxwEdfkKsXvgzLAHhg= +github.com/kubescape/opa-utils v0.0.279/go.mod h1:N/UnbZHpoiHQH7O50yadhIXZvVl0IVtTGBmePPrSQSg= github.com/kubescape/rbac-utils v0.0.20 h1:1MMxsCsCZ3ntDi8f9ZYYcY+K7bv50bDW5ZvnGnhMhJw= github.com/kubescape/rbac-utils v0.0.20/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= From 6e720668f40bf0d733d9259693bd67d1b1a7b1b2 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 12 Mar 2024 16:42:31 +0200 Subject: [PATCH 136/195] bump go version Signed-off-by: YiscahLevySilas1 --- .github/workflows/create-release-without-st.yaml | 2 +- .github/workflows/create-release.yaml | 4 ++-- .github/workflows/pr-tests.yaml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/create-release-without-st.yaml b/.github/workflows/create-release-without-st.yaml index ef2f6330d..c8e1f65f1 100644 --- a/.github/workflows/create-release-without-st.yaml +++ b/.github/workflows/create-release-without-st.yaml @@ -36,7 +36,7 @@ jobs: - name: Set up Go uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 with: - go-version: '1.20' + go-version: '1.21' - name: setup python uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml index 413209019..6a3eca62f 100644 --- a/.github/workflows/create-release.yaml +++ b/.github/workflows/create-release.yaml @@ -22,7 +22,7 @@ jobs: pull-requests: write uses: kubescape/workflows/.github/workflows/go-basic-tests.yaml@main with: - GO_VERSION: '1.20' + GO_VERSION: '1.21' BUILD_PATH: github.com/kubescape/regolibrary/gitregostore/... secrets: inherit @@ -51,7 +51,7 @@ jobs: - name: Set up Go uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 with: - go-version: '1.20' + go-version: '1.21' - name: Test Regoes working-directory: testrunner diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index b1b79309b..c517620bb 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -21,7 +21,7 @@ jobs: pull-requests: write uses: kubescape/workflows/.github/workflows/go-basic-tests.yaml@main with: - GO_VERSION: '1.20' + GO_VERSION: '1.21' BUILD_PATH: github.com/kubescape/regolibrary/gitregostore/... secrets: inherit @@ -47,7 +47,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: '1.20' + go-version: '1.21' # testing rego library - name: Test Regoes From 283bc5b6cb8cac24d41943aaa0879a41f1fe56ee Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Wed, 13 Mar 2024 10:09:31 +0200 Subject: [PATCH 137/195] support v1 version as well Signed-off-by: David Wertenteil --- .../csistoragecapacity-in-default-namespace/rule.metadata.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/csistoragecapacity-in-default-namespace/rule.metadata.json b/rules/csistoragecapacity-in-default-namespace/rule.metadata.json index f23a1df7b..a991ad8cb 100644 --- a/rules/csistoragecapacity-in-default-namespace/rule.metadata.json +++ b/rules/csistoragecapacity-in-default-namespace/rule.metadata.json @@ -9,7 +9,7 @@ "storage.k8s.io" ], "apiVersions": [ - "v1beta1" + "*" ], "resources": [ "CSIStorageCapacity" From 5858503c327377a7eb0c909e08b35bbd3ce7a0bd Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 13 Mar 2024 11:16:57 +0200 Subject: [PATCH 138/195] add isFixedByNetworkPolicy indicator Signed-off-by: YiscahLevySilas1 --- controls/C-0260-missingnetworkpolicy.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/controls/C-0260-missingnetworkpolicy.json b/controls/C-0260-missingnetworkpolicy.json index 1669d1c61..6f1157c7f 100644 --- a/controls/C-0260-missingnetworkpolicy.json +++ b/controls/C-0260-missingnetworkpolicy.json @@ -11,7 +11,8 @@ "Lateral Movement (Network)" ] } - ] + ], + "isFixedByNetworkPolicy": true }, "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", From 20c58c6185a9c441bb6434a93d520d0993dbaa09 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 19 Mar 2024 09:35:27 +0200 Subject: [PATCH 139/195] rm attack track before release Signed-off-by: YiscahLevySilas1 --- ...ternal-wl-with-cluster-takeover-roles.json | 20 ------------------- 1 file changed, 20 deletions(-) delete mode 100644 attack-tracks/external-wl-with-cluster-takeover-roles.json diff --git a/attack-tracks/external-wl-with-cluster-takeover-roles.json b/attack-tracks/external-wl-with-cluster-takeover-roles.json deleted file mode 100644 index d12d0a139..000000000 --- a/attack-tracks/external-wl-with-cluster-takeover-roles.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "apiVersion": "regolibrary.kubescape/v1alpha1", - "kind": "AttackTrack", - "metadata": { - "name": "external-workload-with-cluster-takeover-roles" - }, - "spec": { - "version": "1.0", - "data": { - "name": "Initial Access", - "description": "An attacker can access the Kubernetes environment.", - "subSteps": [ - { - "name": "Cluster Access", - "description": "An attacker has access to sensitive information and can leverage them by creating pods in the cluster." - } - ] - } - } -} \ No newline at end of file From 9c52252306a043822fb2d5a7f98e8b36d61c8e52 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 19 Mar 2024 12:03:30 +0200 Subject: [PATCH 140/195] add release v2, disable automatic latest release Signed-off-by: YiscahLevySilas1 --- .github/workflows/create-release-v2.yaml | 183 ++++++++++++++++++ .../workflows/create-release-without-st.yaml | 128 ------------ .github/workflows/create-release.yaml | 6 +- 3 files changed, 184 insertions(+), 133 deletions(-) create mode 100644 .github/workflows/create-release-v2.yaml delete mode 100644 .github/workflows/create-release-without-st.yaml diff --git a/.github/workflows/create-release-v2.yaml b/.github/workflows/create-release-v2.yaml new file mode 100644 index 000000000..5cfae7f80 --- /dev/null +++ b/.github/workflows/create-release-v2.yaml @@ -0,0 +1,183 @@ +name: 'Create and Publish Tags with Testing and Artifact Handling' + +on: + push: + tags: + - 'v*.*.*-rc.*' + + +env: + REGO_ARTIFACT_KEY_NAME: rego_artifact + REGO_ARTIFACT_PATH: release + +jobs: + test_pr_checks: + permissions: + pull-requests: write + uses: kubescape/workflows/.github/workflows/go-basic-tests.yaml@main + with: + GO_VERSION: '1.21' + BUILD_PATH: github.com/kubescape/regolibrary/gitregostore/... + secrets: inherit + + build-and-rego-test: + needs: [test_pr_checks] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + name: Checkout repo content + + - name: Set up Go 1.21 + uses: actions/setup-go@v2 + with: + go-version: 1.21 + + - name: Test Regos (Golang OPA hot rule compilation) + working-directory: testrunner + run: | + sudo apt update && sudo apt install -y cmake + GOPATH=$(go env GOPATH) make + + - name: Setup Python 3.10.6 + uses: actions/setup-python@v2 + with: + python-version: 3.10.6 + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install requests + + - name: Update frameworks subsections (generating subsections ids) + run: python ./scripts/generate_subsections_ids.py + + - name: Validate control-ID duplications + run: python ./scripts/validations.py + + - name: Generate RegoLibrary artifacts (run export script) + run: python ./scripts/export.py + + - name: Strip Metadata Files Extensions + run: | + cd release + find . -type f \( -name '*.json' -o -name '*.csv' \) | while read f; do mv "$f" "${f%.*}"; done + + - run: ls -laR + + - name: Set outputs + id: set_outputs + run: | + echo "REGO_ARTIFACT_KEY_NAME=${{ env.REGO_ARTIFACT_KEY_NAME }}" >> $GITHUB_OUTPUT + echo "REGO_ARTIFACT_PATH=${{ env.REGO_ARTIFACT_PATH }}" >> $GITHUB_OUTPUT + + - name: Upload artifact + uses: actions/upload-artifact@v2 + with: + name: ${{ env.REGO_ARTIFACT_KEY_NAME }} + path: ${{ env.REGO_ARTIFACT_PATH }}/ + if-no-files-found: error + + # test kubescape e2e flow with tested artifacts + ks-and-rego-test: + uses: kubescape/workflows/.github/workflows/kubescape-cli-e2e-tests.yaml@main + needs: [build-and-rego-test] + if: ${{ (always() && (contains(needs.*.result, 'success')) && !(contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }} + with: + DOWNLOAD_ARTIFACT_KEY_NAME: ${{ needs.build-and-rego-test.outputs.REGO_ARTIFACT_KEY_NAME }} + BINARY_TESTS: '[ "scan_nsa", + "scan_mitre", + "scan_with_exceptions", + "scan_repository", + "scan_local_file", + "scan_local_glob_files", + "scan_nsa_and_submit_to_backend", + "scan_mitre_and_submit_to_backend", + "scan_local_repository_and_submit_to_backend", + "scan_repository_from_url_and_submit_to_backend", + "host_scanner", + "scan_local_list_of_files", + "scan_compliance_score" + ]' + DOWNLOAD_ARTIFACT_PATH: ${{ needs.build-and-rego-test.outputs.REGO_ARTIFACT_PATH }} + secrets: inherit + + # start release process + create-new-tag-and-release: + needs: [ks-and-rego-test] + if: ${{ (always() && (contains(needs.*.result, 'success')) && !(contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }} + name: create release and upload assets + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + name: Checkout repository + + - name: 'Generate Release Tag' + id: generate_tag + uses: kubescape/workflows/.github/actions/tag-action@main + with: + ORIGINAL_TAG: ${{ github.ref_name }} + SUB_STRING: "-rc." + + # Create and push the full version tag (e.g., v2.0.1) + - name: Create and Push Full Tag + uses: rickstaa/action-create-tag@v1 + with: + tag: ${{ steps.generate_tag.outputs.NEW_TAG }} + force_push_tag: false + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Generate Short Tag + id: short_tag + run: | + SHORT_TAG=$(echo "${{ steps.generate_tag.outputs.NEW_TAG }}" | grep -oP '^v\d+') + echo "Short tag: $SHORT_TAG" + echo "SHORT_TAG=$SHORT_TAG" >> $GITHUB_ENV + + - name: Force Push Short Tag + uses: rickstaa/action-create-tag@v1 + with: + tag: ${{ env.SHORT_TAG }} + force_push_tag: true + github_token: ${{ secrets.GITHUB_TOKEN }} + + - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # ratchet:actions/download-artifact@v3.0.2 + id: download-artifact + with: + name: ${{ env.REGO_ARTIFACT_KEY_NAME }} + path: ${{ env.REGO_ARTIFACT_PATH }} + + - name: Create or Update Release and Upload Assets + uses: softprops/action-gh-release@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + tag_name: ${{ env.SHORT_TAG }} + name: ${{ env.SHORT_TAG }} + body: "Automated release for ${{ env.SHORT_TAG}}" + files: ${{ env.REGO_ARTIFACT_PATH }}/* + draft: false + fail_on_unmatched_files: true + prerelease: false + make_latest: "false" + + # Update regolibrary documentation with latest controls and rules. + update-documentation: + needs: [release] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # ratchet:actions/checkout@v3.5.2 + name: checkout repo content + - name: setup python + uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # ratchet:actions/setup-python@v4.6.0 + with: + python-version: 3.8 + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install requests + - name: execute upload script + env: + README_API_KEY: ${{ secrets.README_API_KEY }} + run: |- + python ./scripts/upload-readme.py + - name: execute docs generator script + run: python ./scripts/mk-generator.py # Script to generate controls library documentation diff --git a/.github/workflows/create-release-without-st.yaml b/.github/workflows/create-release-without-st.yaml deleted file mode 100644 index c8e1f65f1..000000000 --- a/.github/workflows/create-release-without-st.yaml +++ /dev/null @@ -1,128 +0,0 @@ -name: create release without system tests -on: - workflow_dispatch: - inputs: - TAG: - description: 'Tag name' - required: true - type: string - -env: - REGO_ARTIFACT_KEY_NAME: rego_artifact - REGO_ARTIFACT_PATH: release - -jobs: - # build regolibrary artifacts / test rego dependencies / test rego unit-tests - build-and-rego-test: - name: Build and test rego artifacts - runs-on: ubuntu-latest - outputs: - NEW_TAG: ${{ steps.tag-calculator.outputs.NEW_TAG }} - REGO_ARTIFACT_KEY_NAME: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_KEY_NAME }} - REGO_ARTIFACT_PATH: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_PATH }} - steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f - name: checkout repo content - with: - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - - - id: tag-calculator - uses: kubescape/workflows/.github/actions/tag-action@main - with: - ORIGINAL_TAG: ${{ inputs.TAG }} - SUB_STRING: "-rc" - - # Test using Golang OPA hot rule compilation - - name: Set up Go - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 - with: - go-version: '1.21' - - - name: setup python - uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa - with: - python-version: 3.10.6 - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install requests - - # generating subsections ids - - name: Update frameworks subsections - run: python ./scripts/generate_subsections_ids.py - - # validate control-ID duplications - - run: python ./scripts/validations.py - - # run export script to generate regolibrary artifacts - - run: python ./scripts/export.py - - # removing release artifacts file extensions - - name: Strip Metadata Files Extensions - run: | - cd release - find -type f -name '*.json' | while read f; do mv "$f" "${f%.json}"; done - find -type f -name '*.csv' | while read f; do mv "$f" "${f%.csv}"; done - - - run: ls -laR - - - name: Set outputs - id: set_outputs - run: | - echo "REGO_ARTIFACT_KEY_NAME=${{ env.REGO_ARTIFACT_KEY_NAME }}" >> $GITHUB_OUTPUT - echo "REGO_ARTIFACT_PATH=${{ env.REGO_ARTIFACT_PATH }}" >> $GITHUB_OUTPUT - - - uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # ratchet:actions/upload-artifact@v3.1.1 - name: Upload artifact - with: - name: ${{ env.REGO_ARTIFACT_KEY_NAME }} - path: ${{ env.REGO_ARTIFACT_PATH }}/ - if-no-files-found: error - - # start release process - release: - if: ${{ (always() && (contains(needs.*.result, 'success')) && !(contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }} - name: create release and upload assets - needs: [build-and-rego-test] - runs-on: ubuntu-latest - steps: - - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # ratchet:actions/download-artifact@v3.0.2 - id: download-artifact - with: - name: ${{ env.REGO_ARTIFACT_KEY_NAME }} - path: ${{ env.REGO_ARTIFACT_PATH }} - - - name: Create Release and upload assets - id: create_release_upload_assets - uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 - with: - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - name: Release ${{ needs.build-and-rego-test.outputs.NEW_TAG }} - tag_name: ${{ needs.build-and-rego-test.outputs.NEW_TAG }} - draft: false - fail_on_unmatched_files: true - prerelease: false - files: '${{ env.REGO_ARTIFACT_PATH }}/*' - - # Update regolibrary documentation with latest controls and rules. - update-documentation: - needs: [release] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # ratchet:actions/checkout@v3.5.2 - name: checkout repo content - - name: setup python - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # ratchet:actions/setup-python@v4.6.0 - with: - python-version: 3.8 - - name: install dependencies - run: | - python -m pip install --upgrade pip - pip install requests - - name: execute upload script - env: - README_API_KEY: ${{ secrets.README_API_KEY }} - run: |- - python ./scripts/upload-readme.py - - name: execute docs generator script - run: python ./scripts/mk-generator.py # Script to generate controls library documentation \ No newline at end of file diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml index 6a3eca62f..087ec2233 100644 --- a/.github/workflows/create-release.yaml +++ b/.github/workflows/create-release.yaml @@ -6,11 +6,7 @@ on: description: 'Tag name' required: true type: string - - push: - tags: - - 'v*.*.*-rc.*' - + env: REGO_ARTIFACT_KEY_NAME: rego_artifact REGO_ARTIFACT_PATH: release From 14ba9f8b58f746a1d65ddfe944838bba0a9f2334 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 19 Mar 2024 13:32:04 +0200 Subject: [PATCH 141/195] rm attack track before release Signed-off-by: YiscahLevySilas1 --- controls/C-0256-exposuretointernet.json | 6 ------ controls/C-0267-workloadwithclustertakeoverroles.json | 11 +---------- 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/controls/C-0256-exposuretointernet.json b/controls/C-0256-exposuretointernet.json index 0abb72f5b..8c9776554 100644 --- a/controls/C-0256-exposuretointernet.json +++ b/controls/C-0256-exposuretointernet.json @@ -17,12 +17,6 @@ "Initial Access" ] }, - { - "attackTrack": "external-workload-with-cluster-takeover-roles", - "categories": [ - "Initial Access" - ] - }, { "attackTrack": "external-database-without-authentication", "categories": [ diff --git a/controls/C-0267-workloadwithclustertakeoverroles.json b/controls/C-0267-workloadwithclustertakeoverroles.json index e048787a9..708016d0c 100644 --- a/controls/C-0267-workloadwithclustertakeoverroles.json +++ b/controls/C-0267-workloadwithclustertakeoverroles.json @@ -4,16 +4,7 @@ "controlTypeTags": [ "security" ], - "attackTracks": [ - { - "attackTrack": "external-workload-with-cluster-takeover-roles", - "categories": [ - "Cluster Access" - ], - "displayRelatedResources": true, - "clickableResourceKind": "ServiceAccount" - } - ] + "attackTracks": [] }, "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", From 8e7e6a0ce759f681e0a008ed86f62b168a05eb46 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 20 Mar 2024 08:46:23 +0200 Subject: [PATCH 142/195] add workflow_dispatch Signed-off-by: YiscahLevySilas1 --- .github/workflows/create-release-v2.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/create-release-v2.yaml b/.github/workflows/create-release-v2.yaml index 5cfae7f80..a059ed611 100644 --- a/.github/workflows/create-release-v2.yaml +++ b/.github/workflows/create-release-v2.yaml @@ -1,6 +1,13 @@ name: 'Create and Publish Tags with Testing and Artifact Handling' on: + workflow_dispatch: + inputs: + TAG: + description: 'Tag name' + required: true + type: string + push: tags: - 'v*.*.*-rc.*' From cd516e4cceb1b9770324f50288e906c12db3f36f Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 20 Mar 2024 08:49:36 +0200 Subject: [PATCH 143/195] remove old release workflow Signed-off-by: YiscahLevySilas1 --- .github/workflows/create-release.yaml | 170 -------------------------- 1 file changed, 170 deletions(-) delete mode 100644 .github/workflows/create-release.yaml diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml deleted file mode 100644 index 087ec2233..000000000 --- a/.github/workflows/create-release.yaml +++ /dev/null @@ -1,170 +0,0 @@ -name: create release -on: - workflow_dispatch: - inputs: - TAG: - description: 'Tag name' - required: true - type: string - -env: - REGO_ARTIFACT_KEY_NAME: rego_artifact - REGO_ARTIFACT_PATH: release - -jobs: - # main job of testing and building the env. - test_pr_checks: - permissions: - pull-requests: write - uses: kubescape/workflows/.github/workflows/go-basic-tests.yaml@main - with: - GO_VERSION: '1.21' - BUILD_PATH: github.com/kubescape/regolibrary/gitregostore/... - secrets: inherit - - # build regolibrary artifacts / test rego dependencies / test rego unit-tests - build-and-rego-test: - needs: [test_pr_checks] - name: Build and test rego artifacts - runs-on: ubuntu-latest - outputs: - NEW_TAG: ${{ steps.tag-calculator.outputs.NEW_TAG }} - REGO_ARTIFACT_KEY_NAME: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_KEY_NAME }} - REGO_ARTIFACT_PATH: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_PATH }} - steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f - name: checkout repo content - with: - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - - - id: tag-calculator - uses: kubescape/workflows/.github/actions/tag-action@main - with: - ORIGINAL_TAG: ${{ inputs.TAG }} - SUB_STRING: "-rc" - - # Test using Golang OPA hot rule compilation - - name: Set up Go - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 - with: - go-version: '1.21' - - - name: Test Regoes - working-directory: testrunner - run: | - apt update && apt install -y cmake - GOPATH=$(go env GOPATH) make - - - name: setup python - uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa - with: - python-version: 3.10.6 - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install requests - - # generating subsections ids - - name: Update frameworks subsections - run: python ./scripts/generate_subsections_ids.py - - # validate control-ID duplications - - run: python ./scripts/validations.py - - # run export script to generate regolibrary artifacts - - run: python ./scripts/export.py - - # removing release artifacts file extensions - - name: Strip Metadata Files Extensions - run: | - cd release - find -type f -name '*.json' | while read f; do mv "$f" "${f%.json}"; done - find -type f -name '*.csv' | while read f; do mv "$f" "${f%.csv}"; done - - - run: ls -laR - - - name: Set outputs - id: set_outputs - run: | - echo "REGO_ARTIFACT_KEY_NAME=${{ env.REGO_ARTIFACT_KEY_NAME }}" >> $GITHUB_OUTPUT - echo "REGO_ARTIFACT_PATH=${{ env.REGO_ARTIFACT_PATH }}" >> $GITHUB_OUTPUT - - - uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # ratchet:actions/upload-artifact@v3.1.1 - name: Upload artifact - with: - name: ${{ env.REGO_ARTIFACT_KEY_NAME }} - path: ${{ env.REGO_ARTIFACT_PATH }}/ - if-no-files-found: error - - # test kubescape e2e flow with tested artifacts - ks-and-rego-test: - uses: kubescape/workflows/.github/workflows/kubescape-cli-e2e-tests.yaml@main - needs: [build-and-rego-test] - if: ${{ (always() && (contains(needs.*.result, 'success')) && !(contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }} - with: - DOWNLOAD_ARTIFACT_KEY_NAME: ${{ needs.build-and-rego-test.outputs.REGO_ARTIFACT_KEY_NAME }} - BINARY_TESTS: '[ "scan_nsa", - "scan_mitre", - "scan_with_exceptions", - "scan_repository", - "scan_local_file", - "scan_local_glob_files", - "scan_nsa_and_submit_to_backend", - "scan_mitre_and_submit_to_backend", - "scan_local_repository_and_submit_to_backend", - "scan_repository_from_url_and_submit_to_backend", - "host_scanner", - "scan_local_list_of_files", - "scan_compliance_score" - ]' - DOWNLOAD_ARTIFACT_PATH: ${{ needs.build-and-rego-test.outputs.REGO_ARTIFACT_PATH }} - secrets: inherit - - # start release process - release: - needs: [ks-and-rego-test] - if: ${{ (always() && (contains(needs.*.result, 'success')) && !(contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }} - name: create release and upload assets - runs-on: ubuntu-latest - steps: - - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # ratchet:actions/download-artifact@v3.0.2 - id: download-artifact - with: - name: ${{ env.REGO_ARTIFACT_KEY_NAME }} - path: ${{ env.REGO_ARTIFACT_PATH }} - - - name: Create Release and upload assets - id: create_release_upload_assets - uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 - with: - token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - name: Release ${{ needs.build-and-rego-test.outputs.NEW_TAG }} - tag_name: ${{ needs.build-and-rego-test.outputs.NEW_TAG }} - body: ${{ github.event.pull_request.body }} - draft: false - fail_on_unmatched_files: true - prerelease: false - files: '${{ env.REGO_ARTIFACT_PATH }}/*' - - # Update regolibrary documentation with latest controls and rules. - update-documentation: - needs: [release] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # ratchet:actions/checkout@v3.5.2 - name: checkout repo content - - name: setup python - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # ratchet:actions/setup-python@v4.6.0 - with: - python-version: 3.8 - - name: install dependencies - run: | - python -m pip install --upgrade pip - pip install requests - - name: execute upload script - env: - README_API_KEY: ${{ secrets.README_API_KEY }} - run: |- - python ./scripts/upload-readme.py - - name: execute docs generator script - run: python ./scripts/mk-generator.py # Script to generate controls library documentation From 1d84ac7186ea8fc4c181e246a18d6809d2f2066b Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 20 Mar 2024 14:48:40 +0200 Subject: [PATCH 144/195] expand condition to match releases that aren't latest Signed-off-by: YiscahLevySilas1 --- gitregostore/datastructures.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gitregostore/datastructures.go b/gitregostore/datastructures.go index 49b21983c..9f24d57a9 100644 --- a/gitregostore/datastructures.go +++ b/gitregostore/datastructures.go @@ -56,7 +56,7 @@ func newGitRegoStore(baseUrl string, owner string, repository string, path strin watch = true } - if strings.Contains(tag, "latest") || strings.Contains(tag, "download") { + if strings.Contains(tag, "latest") || strings.Contains(tag, "download") || strings.Contains(path, "releases") { // TODO - This condition was added to avoid dependency on updating productions configs on deployment. // Once production configs are updated (branch set to ""), this condition can be removed. if strings.ToLower(branch) == "master" { From 502c246136d210287a29481274fb5b0028cb41de Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 20 Mar 2024 16:25:45 +0200 Subject: [PATCH 145/195] fix workflow Signed-off-by: YiscahLevySilas1 --- .github/workflows/create-release-v2.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/create-release-v2.yaml b/.github/workflows/create-release-v2.yaml index a059ed611..bb8257680 100644 --- a/.github/workflows/create-release-v2.yaml +++ b/.github/workflows/create-release-v2.yaml @@ -7,7 +7,7 @@ on: description: 'Tag name' required: true type: string - + push: tags: - 'v*.*.*-rc.*' @@ -168,7 +168,7 @@ jobs: # Update regolibrary documentation with latest controls and rules. update-documentation: - needs: [release] + needs: [create-new-tag-and-release] runs-on: ubuntu-latest steps: - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # ratchet:actions/checkout@v3.5.2 From ae62d2c6c1ff490ddfcc96de688d791920b62db4 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 21 Mar 2024 08:41:46 +0200 Subject: [PATCH 146/195] fix workflow Signed-off-by: YiscahLevySilas1 --- .github/workflows/create-release-v2.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/create-release-v2.yaml b/.github/workflows/create-release-v2.yaml index bb8257680..495579f4a 100644 --- a/.github/workflows/create-release-v2.yaml +++ b/.github/workflows/create-release-v2.yaml @@ -105,7 +105,7 @@ jobs: "scan_local_list_of_files", "scan_compliance_score" ]' - DOWNLOAD_ARTIFACT_PATH: ${{ needs.build-and-rego-test.outputs.REGO_ARTIFACT_PATH }} + DOWNLOAD_ARTIFACT_PATH: ${{ env.REGO_ARTIFACT_PATH }} secrets: inherit # start release process From 9ac7af846680f9cb279d839d3e29bce66b90e8a7 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 21 Mar 2024 08:54:16 +0200 Subject: [PATCH 147/195] fix workflow Signed-off-by: YiscahLevySilas1 --- .github/workflows/create-release-v2.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/create-release-v2.yaml b/.github/workflows/create-release-v2.yaml index 495579f4a..9333f00ea 100644 --- a/.github/workflows/create-release-v2.yaml +++ b/.github/workflows/create-release-v2.yaml @@ -30,6 +30,9 @@ jobs: build-and-rego-test: needs: [test_pr_checks] runs-on: ubuntu-latest + outputs: + REGO_ARTIFACT_KEY_NAME: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_KEY_NAME }} + REGO_ARTIFACT_PATH: ${{ steps.set_outputs.outputs.REGO_ARTIFACT_PATH }} steps: - uses: actions/checkout@v2 name: Checkout repo content @@ -105,7 +108,7 @@ jobs: "scan_local_list_of_files", "scan_compliance_score" ]' - DOWNLOAD_ARTIFACT_PATH: ${{ env.REGO_ARTIFACT_PATH }} + DOWNLOAD_ARTIFACT_PATH: ${{ needs.build-and-rego-test.outputs.REGO_ARTIFACT_PATH }} secrets: inherit # start release process From 6d9114d5e0e909061b485ddaa3f1ed8048f09f18 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 <80635572+YiscahLevySilas1@users.noreply.github.com> Date: Thu, 21 Mar 2024 11:06:07 +0200 Subject: [PATCH 148/195] Revert "removed control C-0264 from soc2 FW" --- frameworks/soc2.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/frameworks/soc2.json b/frameworks/soc2.json index 9ff8422c5..03aa66125 100644 --- a/frameworks/soc2.json +++ b/frameworks/soc2.json @@ -60,6 +60,16 @@ "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." } + }, + { + "controlID": "C-0264", + "patch": { + "name": "Data in rest encryption - Persistent Volumes are encrypted (CC1.1,CC6.7)", + "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", + "long_description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server." + } } + + ] } From d9b9ec39de5d786ad708ef7c327297b3d36be5fa Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 25 Mar 2024 12:13:34 +0200 Subject: [PATCH 149/195] add constructor for regolibrary V2 Signed-off-by: YiscahLevySilas1 --- gitregostore/datastructures.go | 7 +++++++ gitregostore/gitstoremethods_test.go | 12 ++++++++++++ 2 files changed, 19 insertions(+) diff --git a/gitregostore/datastructures.go b/gitregostore/datastructures.go index 9f24d57a9..5bc357be2 100644 --- a/gitregostore/datastructures.go +++ b/gitregostore/datastructures.go @@ -98,6 +98,13 @@ func (gs *GitRegoStore) SetRegoObjects() error { return err } +// NewDefaultGitRegoStore - generates git store object for production regolibrary release files. +// Release files source: "https://github.com/kubescape/regolibrary/releases/latest/download" +func NewGitRegoStoreV2(frequency int) *GitRegoStore { + gs := NewGitRegoStore("https://github.com", "kubescape", "regolibrary", "releases", "download/v2", "", frequency) + return gs +} + // NewDefaultGitRegoStore - generates git store object for production regolibrary release files. // Release files source: "https://github.com/kubescape/regolibrary/releases/latest/download" func NewDefaultGitRegoStore(frequency int) *GitRegoStore { diff --git a/gitregostore/gitstoremethods_test.go b/gitregostore/gitstoremethods_test.go index 400f96bdf..b25797925 100644 --- a/gitregostore/gitstoremethods_test.go +++ b/gitregostore/gitstoremethods_test.go @@ -222,6 +222,18 @@ func gs_tests(t *testing.T, gs *GitRegoStore) { }) } +func TestGetPoliciesMethodsNewV2(t *testing.T) { + t.Parallel() + + gs := NewGitRegoStoreV2(-1) + t.Run("shoud set objects in rego store", func(t *testing.T) { + require.NoError(t, gs.SetRegoObjects()) + }) + + gs_tests(t, gs) + +} + func TestGetPoliciesMethodsNew(t *testing.T) { t.Parallel() From edf95e837c09e40d76bd20701fe8b2009fa61fbf Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 25 Mar 2024 14:39:37 +0200 Subject: [PATCH 150/195] upgrade module to v2 Signed-off-by: YiscahLevySilas1 --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index c1f88515f..cecf821c0 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/kubescape/regolibrary +module github.com/kubescape/regolibrary/v2 go 1.21 From f7dca44d2a3d32b8e0e7bb0268e17f38d72ba9b3 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 25 Mar 2024 14:41:57 +0200 Subject: [PATCH 151/195] fix comment Signed-off-by: YiscahLevySilas1 --- gitregostore/datastructures.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gitregostore/datastructures.go b/gitregostore/datastructures.go index 5bc357be2..47441fe52 100644 --- a/gitregostore/datastructures.go +++ b/gitregostore/datastructures.go @@ -98,8 +98,8 @@ func (gs *GitRegoStore) SetRegoObjects() error { return err } -// NewDefaultGitRegoStore - generates git store object for production regolibrary release files. -// Release files source: "https://github.com/kubescape/regolibrary/releases/latest/download" +// NewGitRegoStoreV2 - generates git store object for production v2 regolibrary release files. +// Release files source: "https://github.com/kubescape/regolibrary/releases/tag/v2" func NewGitRegoStoreV2(frequency int) *GitRegoStore { gs := NewGitRegoStore("https://github.com", "kubescape", "regolibrary", "releases", "download/v2", "", frequency) return gs From 3f87f91d5064447907b6073315290a28c8eaa6fd Mon Sep 17 00:00:00 2001 From: kooomix Date: Tue, 26 Mar 2024 11:43:25 +0200 Subject: [PATCH 152/195] Fix privilege escalation and privileged container issues Signed-off-by: kooomix --- rules/rule-allow-privilege-escalation/raw.rego | 17 +++++++++++------ .../test/cronjob/expected.json | 15 +++++++++++++-- .../test/workloads/expected.json | 7 ++++++- 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/rules/rule-allow-privilege-escalation/raw.rego b/rules/rule-allow-privilege-escalation/raw.rego index 41cf0a6f2..9663e9dd9 100644 --- a/rules/rule-allow-privilege-escalation/raw.rego +++ b/rules/rule-allow-privilege-escalation/raw.rego @@ -81,7 +81,9 @@ is_allow_privilege_escalation_container(container, i, start_of_path) = [failed_p psps := [psp | psp= input[_]; psp.kind == "PodSecurityPolicy"] count(psps) == 0 failed_path = "" - fixPath = {"path": sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)]), "value":"false"} + fixPath = [{"path": sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)]), "value":"false"}, + {"path": sprintf("%vcontainers[%v].securityContext.privileged", [start_of_path, format_int(i, 10)]), "value":"false"} + ] } is_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] { @@ -92,7 +94,10 @@ is_allow_privilege_escalation_container(container, i, start_of_path) = [failed_p psp := psps[_] not psp.spec.allowPrivilegeEscalation == false failed_path = "" - fixPath = {"path": sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)]), "value":"false"} + fixPath = [{"path": sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)]), "value":"false"}, + {"path": sprintf("%vcontainers[%v].securityContext.privileged", [start_of_path, format_int(i, 10)]), "value":"false"} + + ] } @@ -101,7 +106,7 @@ is_allow_privilege_escalation_container(container, i, start_of_path) = [failed_p psps := [psp | psp= input[_]; psp.kind == "PodSecurityPolicy"] count(psps) == 0 fixPath = "" - failed_path = sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)]) + failed_path = [sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)])] } is_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] { @@ -111,15 +116,15 @@ is_allow_privilege_escalation_container(container, i, start_of_path)= [failed_pa psp := psps[_] not psp.spec.allowPrivilegeEscalation == false fixPath = "" - failed_path = sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)]) + failed_path = [sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)])] } - get_failed_path(paths) = [paths[0]] { + get_failed_path(paths) = paths[0] { paths[0] != "" } else = [] -get_fixed_path(paths) = [paths[1]] { +get_fixed_path(paths) = paths[1] { paths[1] != "" } else = [] diff --git a/rules/rule-allow-privilege-escalation/test/cronjob/expected.json b/rules/rule-allow-privilege-escalation/test/cronjob/expected.json index 5e3ef1b1e..e9f6ca325 100644 --- a/rules/rule-allow-privilege-escalation/test/cronjob/expected.json +++ b/rules/rule-allow-privilege-escalation/test/cronjob/expected.json @@ -5,7 +5,12 @@ "fixPaths": [{ "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation", "value": "false" - }], + }, + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.privileged", + "value": "false" + } +], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, @@ -25,7 +30,13 @@ "fixPaths": [{ "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.allowPrivilegeEscalation", "value": "false" - }], + }, + { + "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.privileged", + "value": "false" + } + +], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, diff --git a/rules/rule-allow-privilege-escalation/test/workloads/expected.json b/rules/rule-allow-privilege-escalation/test/workloads/expected.json index d8ffd3d4f..f4cdaff01 100644 --- a/rules/rule-allow-privilege-escalation/test/workloads/expected.json +++ b/rules/rule-allow-privilege-escalation/test/workloads/expected.json @@ -25,7 +25,12 @@ "fixPaths": [{ "path": "spec.template.spec.containers[1].securityContext.allowPrivilegeEscalation", "value": "false" - }], + + }, + { + "path": "spec.template.spec.containers[1].securityContext.privileged", + "value": "false" + }], "ruleStatus": "", "packagename": "armo_builtins", "alertScore": 7, From f5e71bae039df2d9570fa29789bd10b40d36d26a Mon Sep 17 00:00:00 2001 From: kooomix Date: Tue, 26 Mar 2024 14:27:43 +0200 Subject: [PATCH 153/195] fix C-0074 Signed-off-by: kooomix --- .../raw.rego | 31 +++++++++++++------ .../test/cronjob-containerd/expected.json | 7 +++-- .../test/cronjob-crio/expected.json | 6 ++-- .../test/cronjob/expected.json | 7 +++-- .../test/pod-containerd/expected.json | 6 ++-- .../test/pod-crio/expected.json | 6 ++-- .../test/pod/expected.json | 6 ++-- .../test/workloads-containerd/expected.json | 4 +-- .../test/workloads-crio/expected.json | 4 +-- .../test/workloads/expected.json | 4 +-- 10 files changed, 54 insertions(+), 27 deletions(-) diff --git a/rules/containers-mounting-docker-socket/raw.rego b/rules/containers-mounting-docker-socket/raw.rego index 9c74778af..b52a6b453 100644 --- a/rules/containers-mounting-docker-socket/raw.rego +++ b/rules/containers-mounting-docker-socket/raw.rego @@ -7,12 +7,15 @@ deny[msga] { volume := pod.spec.volumes[i] host_path := volume.hostPath is_runtime_socket_mounting(host_path) - path := sprintf("spec.volumes[%v].hostPath.path", [format_int(i, 10)]) + path := sprintf("spec.volumes[%v]", [format_int(i, 10)]) + volumeMounts := pod.spec.containers[j].volumeMounts + pathMounts = volume_mounts(volume.name, volumeMounts, sprintf("spec.containers[%v]", [j])) + finalPath := array.concat([path], pathMounts) msga := { "alertMessage": sprintf("volume: %v in pod: %v has mounting to Docker internals.", [volume.name, pod.metadata.name]), "packagename": "armo_builtins", - "deletePaths": [path], - "failedPaths": [path], + "deletePaths":finalPath, + "failedPaths": finalPath, "fixPaths":[], "alertScore": 5, "alertObject": { @@ -30,12 +33,15 @@ deny[msga] { volume := wl.spec.template.spec.volumes[i] host_path := volume.hostPath is_runtime_socket_mounting(host_path) - path := sprintf("spec.template.spec.volumes[%v].hostPath.path", [format_int(i, 10)]) + path := sprintf("spec.template.spec.volumes[%v]", [format_int(i, 10)]) + volumeMounts := wl.spec.template.spec.containers[j].volumeMounts + pathMounts = volume_mounts(volume.name,volumeMounts, sprintf("spec.template.spec.containers[%v]", [j])) + finalPath := array.concat([path], pathMounts) msga := { "alertMessage": sprintf("volume: %v in %v: %v has mounting to Docker internals.", [ volume.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", - "deletePaths": [path], - "failedPaths": [path], + "deletePaths": finalPath, + "failedPaths": finalPath, "fixPaths":[], "alertScore": 5, "alertObject": { @@ -51,12 +57,15 @@ deny[msga] { volume = wl.spec.jobTemplate.spec.template.spec.volumes[i] host_path := volume.hostPath is_runtime_socket_mounting(host_path) - path := sprintf("spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path", [format_int(i, 10)]) + path := sprintf("spec.jobTemplate.spec.template.spec.volumes[%v]", [format_int(i, 10)]) + volumeMounts := wl.spec.jobTemplate.spec.template.spec.containers[j].volumeMounts + pathMounts = volume_mounts(volume.name,volumeMounts, sprintf("spec.jobTemplate.spec.template.spec.containers[%v]", [j])) + finalPath := array.concat([path], pathMounts) msga := { "alertMessage": sprintf("volume: %v in %v: %v has mounting to Docker internals.", [ volume.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", - "deletePaths": [path], - "failedPaths": [path], + "deletePaths": finalPath, + "failedPaths": finalPath, "fixPaths":[], "alertScore": 5, "alertObject": { @@ -65,6 +74,10 @@ deny[msga] { } } +volume_mounts(name, volume_mounts, str) = [path] { + name == volume_mounts[j].name + path := sprintf("%s.volumeMounts[%v]", [str, j]) +} else = [] is_runtime_socket_mounting(host_path) { host_path.path == "/var/run/docker.sock" diff --git a/rules/containers-mounting-docker-socket/test/cronjob-containerd/expected.json b/rules/containers-mounting-docker-socket/test/cronjob-containerd/expected.json index 436eb3bfc..f87bbe7f8 100644 --- a/rules/containers-mounting-docker-socket/test/cronjob-containerd/expected.json +++ b/rules/containers-mounting-docker-socket/test/cronjob-containerd/expected.json @@ -2,10 +2,13 @@ { "alertMessage": "volume: test-volume in CronJob: hello has mounting to Docker internals.", "deletePaths": [ - "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" + "spec.jobTemplate.spec.template.spec.volumes[0]", + "spec.jobTemplate.spec.template.spec.containers[0].volumeMounts[0]" + ], "failedPaths": [ - "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" + "spec.jobTemplate.spec.template.spec.volumes[0]", + "spec.jobTemplate.spec.template.spec.containers[0].volumeMounts[0]" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/containers-mounting-docker-socket/test/cronjob-crio/expected.json b/rules/containers-mounting-docker-socket/test/cronjob-crio/expected.json index 436eb3bfc..b14f08fb7 100644 --- a/rules/containers-mounting-docker-socket/test/cronjob-crio/expected.json +++ b/rules/containers-mounting-docker-socket/test/cronjob-crio/expected.json @@ -2,10 +2,12 @@ { "alertMessage": "volume: test-volume in CronJob: hello has mounting to Docker internals.", "deletePaths": [ - "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" + "spec.jobTemplate.spec.template.spec.volumes[0]", + "spec.jobTemplate.spec.template.spec.containers[0].volumeMounts[0]" ], "failedPaths": [ - "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" + "spec.jobTemplate.spec.template.spec.volumes[0]", + "spec.jobTemplate.spec.template.spec.containers[0].volumeMounts[0]" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/containers-mounting-docker-socket/test/cronjob/expected.json b/rules/containers-mounting-docker-socket/test/cronjob/expected.json index 436eb3bfc..f87bbe7f8 100644 --- a/rules/containers-mounting-docker-socket/test/cronjob/expected.json +++ b/rules/containers-mounting-docker-socket/test/cronjob/expected.json @@ -2,10 +2,13 @@ { "alertMessage": "volume: test-volume in CronJob: hello has mounting to Docker internals.", "deletePaths": [ - "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" + "spec.jobTemplate.spec.template.spec.volumes[0]", + "spec.jobTemplate.spec.template.spec.containers[0].volumeMounts[0]" + ], "failedPaths": [ - "spec.jobTemplate.spec.template.spec.volumes[0].hostPath.path" + "spec.jobTemplate.spec.template.spec.volumes[0]", + "spec.jobTemplate.spec.template.spec.containers[0].volumeMounts[0]" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/containers-mounting-docker-socket/test/pod-containerd/expected.json b/rules/containers-mounting-docker-socket/test/pod-containerd/expected.json index 602a58262..86e5dda70 100644 --- a/rules/containers-mounting-docker-socket/test/pod-containerd/expected.json +++ b/rules/containers-mounting-docker-socket/test/pod-containerd/expected.json @@ -2,10 +2,12 @@ { "alertMessage": "volume: test-volume in pod: test-pd has mounting to Docker internals.", "deletePaths": [ - "spec.volumes[0].hostPath.path" + "spec.volumes[0]", + "spec.containers[0].volumeMounts[0]" ], "failedPaths": [ - "spec.volumes[0].hostPath.path" + "spec.volumes[0]", + "spec.containers[0].volumeMounts[0]" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/containers-mounting-docker-socket/test/pod-crio/expected.json b/rules/containers-mounting-docker-socket/test/pod-crio/expected.json index 602a58262..86e5dda70 100644 --- a/rules/containers-mounting-docker-socket/test/pod-crio/expected.json +++ b/rules/containers-mounting-docker-socket/test/pod-crio/expected.json @@ -2,10 +2,12 @@ { "alertMessage": "volume: test-volume in pod: test-pd has mounting to Docker internals.", "deletePaths": [ - "spec.volumes[0].hostPath.path" + "spec.volumes[0]", + "spec.containers[0].volumeMounts[0]" ], "failedPaths": [ - "spec.volumes[0].hostPath.path" + "spec.volumes[0]", + "spec.containers[0].volumeMounts[0]" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/containers-mounting-docker-socket/test/pod/expected.json b/rules/containers-mounting-docker-socket/test/pod/expected.json index 602a58262..86e5dda70 100644 --- a/rules/containers-mounting-docker-socket/test/pod/expected.json +++ b/rules/containers-mounting-docker-socket/test/pod/expected.json @@ -2,10 +2,12 @@ { "alertMessage": "volume: test-volume in pod: test-pd has mounting to Docker internals.", "deletePaths": [ - "spec.volumes[0].hostPath.path" + "spec.volumes[0]", + "spec.containers[0].volumeMounts[0]" ], "failedPaths": [ - "spec.volumes[0].hostPath.path" + "spec.volumes[0]", + "spec.containers[0].volumeMounts[0]" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/containers-mounting-docker-socket/test/workloads-containerd/expected.json b/rules/containers-mounting-docker-socket/test/workloads-containerd/expected.json index e142cca61..17d23ad59 100644 --- a/rules/containers-mounting-docker-socket/test/workloads-containerd/expected.json +++ b/rules/containers-mounting-docker-socket/test/workloads-containerd/expected.json @@ -2,10 +2,10 @@ { "alertMessage": "volume: test-volume2 in Deployment: my-deployment has mounting to Docker internals.", "deletePaths": [ - "spec.template.spec.volumes[1].hostPath.path" + "spec.template.spec.volumes[1]" ], "failedPaths": [ - "spec.template.spec.volumes[1].hostPath.path" + "spec.template.spec.volumes[1]" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/containers-mounting-docker-socket/test/workloads-crio/expected.json b/rules/containers-mounting-docker-socket/test/workloads-crio/expected.json index e142cca61..17d23ad59 100644 --- a/rules/containers-mounting-docker-socket/test/workloads-crio/expected.json +++ b/rules/containers-mounting-docker-socket/test/workloads-crio/expected.json @@ -2,10 +2,10 @@ { "alertMessage": "volume: test-volume2 in Deployment: my-deployment has mounting to Docker internals.", "deletePaths": [ - "spec.template.spec.volumes[1].hostPath.path" + "spec.template.spec.volumes[1]" ], "failedPaths": [ - "spec.template.spec.volumes[1].hostPath.path" + "spec.template.spec.volumes[1]" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/containers-mounting-docker-socket/test/workloads/expected.json b/rules/containers-mounting-docker-socket/test/workloads/expected.json index e142cca61..17d23ad59 100644 --- a/rules/containers-mounting-docker-socket/test/workloads/expected.json +++ b/rules/containers-mounting-docker-socket/test/workloads/expected.json @@ -2,10 +2,10 @@ { "alertMessage": "volume: test-volume2 in Deployment: my-deployment has mounting to Docker internals.", "deletePaths": [ - "spec.template.spec.volumes[1].hostPath.path" + "spec.template.spec.volumes[1]" ], "failedPaths": [ - "spec.template.spec.volumes[1].hostPath.path" + "spec.template.spec.volumes[1]" ], "fixPaths": [], "ruleStatus": "", From 9abea86e8e997954f5256078e364c3c6187a68c8 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 1 Apr 2024 11:17:46 +0300 Subject: [PATCH 154/195] return fix path in every case Signed-off-by: YiscahLevySilas1 --- .../rule-allow-privilege-escalation/raw.rego | 61 +++------- .../test/cronjob/expected.json | 96 ++++++++-------- .../test/pod/expected.json | 49 ++++---- .../test/workloads/expected.json | 105 ++++++++++-------- 4 files changed, 156 insertions(+), 155 deletions(-) diff --git a/rules/rule-allow-privilege-escalation/raw.rego b/rules/rule-allow-privilege-escalation/raw.rego index 9663e9dd9..4b2e6715e 100644 --- a/rules/rule-allow-privilege-escalation/raw.rego +++ b/rules/rule-allow-privilege-escalation/raw.rego @@ -7,17 +7,14 @@ deny[msga] { pod.kind == "Pod" container := pod.spec.containers[i] start_of_path := "spec." - result := is_allow_privilege_escalation_container(container, i, start_of_path) - failed_path := get_failed_path(result) - fixed_path := get_fixed_path(result) + is_allow_privilege_escalation_container(container) + fixPath := get_fix_path(i, start_of_path) msga := { "alertMessage": sprintf("container: %v in pod: %v allow privilege escalation", [container.name, pod.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": failed_path, - "failedPaths": failed_path, - "fixPaths": fixed_path, + "fixPaths": fixPath, "alertObject": { "k8sApiObjects": [pod] } @@ -32,17 +29,14 @@ deny[msga] { spec_template_spec_patterns[wl.kind] container := wl.spec.template.spec.containers[i] start_of_path := "spec.template.spec." - result := is_allow_privilege_escalation_container(container, i, start_of_path) - failed_path := get_failed_path(result) - fixed_path := get_fixed_path(result) + is_allow_privilege_escalation_container(container) + fixPath := get_fix_path(i, start_of_path) msga := { "alertMessage": sprintf("container :%v in %v: %v allow privilege escalation", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": failed_path, - "failedPaths": failed_path, - "fixPaths": fixed_path, + "fixPaths": fixPath, "alertObject": { "k8sApiObjects": [wl] } @@ -56,17 +50,14 @@ deny[msga] { wl.kind == "CronJob" container = wl.spec.jobTemplate.spec.template.spec.containers[i] start_of_path := "spec.jobTemplate.spec.template.spec." - result := is_allow_privilege_escalation_container(container, i, start_of_path) - failed_path := get_failed_path(result) - fixed_path := get_fixed_path(result) + is_allow_privilege_escalation_container(container) + fixPath := get_fix_path(i, start_of_path) msga := { "alertMessage": sprintf("container :%v in %v: %v allow privilege escalation", [container.name, wl.kind, wl.metadata.name]), "packagename": "armo_builtins", "alertScore": 7, - "reviewPaths": failed_path, - "failedPaths": failed_path, - "fixPaths": fixed_path, + "fixPaths": fixPath, "alertObject": { "k8sApiObjects": [wl] } @@ -75,56 +66,38 @@ deny[msga] { -is_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] { +is_allow_privilege_escalation_container(container) { not container.securityContext.allowPrivilegeEscalation == false not container.securityContext.allowPrivilegeEscalation == true psps := [psp | psp= input[_]; psp.kind == "PodSecurityPolicy"] count(psps) == 0 - failed_path = "" - fixPath = [{"path": sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)]), "value":"false"}, - {"path": sprintf("%vcontainers[%v].securityContext.privileged", [start_of_path, format_int(i, 10)]), "value":"false"} - ] } -is_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] { +is_allow_privilege_escalation_container(container) { not container.securityContext.allowPrivilegeEscalation == false not container.securityContext.allowPrivilegeEscalation == true psps := [psp | psp= input[_]; psp.kind == "PodSecurityPolicy"] count(psps) > 0 psp := psps[_] not psp.spec.allowPrivilegeEscalation == false - failed_path = "" - fixPath = [{"path": sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)]), "value":"false"}, - {"path": sprintf("%vcontainers[%v].securityContext.privileged", [start_of_path, format_int(i, 10)]), "value":"false"} - - ] } -is_allow_privilege_escalation_container(container, i, start_of_path) = [failed_path, fixPath] { +is_allow_privilege_escalation_container(container) { container.securityContext.allowPrivilegeEscalation == true psps := [psp | psp= input[_]; psp.kind == "PodSecurityPolicy"] count(psps) == 0 - fixPath = "" - failed_path = [sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)])] } -is_allow_privilege_escalation_container(container, i, start_of_path)= [failed_path, fixPath] { +is_allow_privilege_escalation_container(container) { container.securityContext.allowPrivilegeEscalation == true psps := [psp | psp= input[_]; psp.kind == "PodSecurityPolicy"] count(psps) > 0 psp := psps[_] not psp.spec.allowPrivilegeEscalation == false - fixPath = "" - failed_path = [sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, format_int(i, 10)])] } - get_failed_path(paths) = paths[0] { - paths[0] != "" -} else = [] - - -get_fixed_path(paths) = paths[1] { - paths[1] != "" -} else = [] - +get_fix_path(i, start_of_path) = fixPath { + fixPath = [{"path": sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, i]), "value":"false"}, + {"path": sprintf("%vcontainers[%v].securityContext.privileged", [start_of_path, i]), "value":"false"}] +} diff --git a/rules/rule-allow-privilege-escalation/test/cronjob/expected.json b/rules/rule-allow-privilege-escalation/test/cronjob/expected.json index e9f6ca325..c9ff558a0 100644 --- a/rules/rule-allow-privilege-escalation/test/cronjob/expected.json +++ b/rules/rule-allow-privilege-escalation/test/cronjob/expected.json @@ -1,52 +1,56 @@ -[{ - "alertMessage": "container :mysql in CronJob: hello allow privilege escalation", - "reviewPaths": [], - "failedPaths": [], - "fixPaths": [{ - "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation", - "value": "false" - }, +[ { - "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.privileged", - "value": "false" - } -], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "batch/v1beta1", - "kind": "CronJob", - "metadata": { - "name": "hello" + "alertMessage": "container :mysql in CronJob: hello allow privilege escalation", + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation", + "value": "false" + }, + { + "path": "spec.jobTemplate.spec.template.spec.containers[0].securityContext.privileged", + "value": "false" } - }] - } -}, { - "alertMessage": "container :php in CronJob: hello allow privilege escalation", - "reviewPaths": [], - "failedPaths": [], - "fixPaths": [{ - "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.allowPrivilegeEscalation", - "value": "false" + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } }, { - "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.privileged", - "value": "false" - } - -], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "batch/v1beta1", - "kind": "CronJob", - "metadata": { - "name": "hello" + "alertMessage": "container :php in CronJob: hello allow privilege escalation", + "fixPaths": [ + { + "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.allowPrivilegeEscalation", + "value": "false" + }, + { + "path": "spec.jobTemplate.spec.template.spec.containers[1].securityContext.privileged", + "value": "false" } - }] + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "batch/v1beta1", + "kind": "CronJob", + "metadata": { + "name": "hello" + } + } + ] + } } -}] \ No newline at end of file +] \ No newline at end of file diff --git a/rules/rule-allow-privilege-escalation/test/pod/expected.json b/rules/rule-allow-privilege-escalation/test/pod/expected.json index 9ecc18440..98f507449 100644 --- a/rules/rule-allow-privilege-escalation/test/pod/expected.json +++ b/rules/rule-allow-privilege-escalation/test/pod/expected.json @@ -1,21 +1,32 @@ -[{ - "alertMessage": "container: test-container in pod: audit-pod allow privilege escalation", - "reviewPaths": ["spec.containers[0].securityContext.allowPrivilegeEscalation"], - "failedPaths": ["spec.containers[0].securityContext.allowPrivilegeEscalation"], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "labels": { - "app": "audit-pod" - }, - "name": "audit-pod" +[ + { + "alertMessage": "container: test-container in pod: audit-pod allow privilege escalation", + "fixPaths": [ + { + "path": "spec.containers[0].securityContext.allowPrivilegeEscalation", + "value": "false" + }, + { + "path": "spec.containers[0].securityContext.privileged", + "value": "false" } - }] + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "app": "audit-pod" + }, + "name": "audit-pod" + } + } + ] + } } -}] \ No newline at end of file +] \ No newline at end of file diff --git a/rules/rule-allow-privilege-escalation/test/workloads/expected.json b/rules/rule-allow-privilege-escalation/test/workloads/expected.json index f4cdaff01..065b97d20 100644 --- a/rules/rule-allow-privilege-escalation/test/workloads/expected.json +++ b/rules/rule-allow-privilege-escalation/test/workloads/expected.json @@ -1,49 +1,62 @@ -[{ - "alertMessage": "container :mysql in Deployment: my-deployment allow privilege escalation", - "reviewPaths": ["spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation"], - "failedPaths": ["spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation"], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "labels": { - "app": "goproxy" - }, - "name": "my-deployment" +[ + { + "alertMessage": "container :mysql in Deployment: my-deployment allow privilege escalation", + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation", + "value": "false" + }, + { + "path": "spec.template.spec.containers[0].securityContext.privileged", + "value": "false" } - }] - } -}, { - "alertMessage": "container :php in Deployment: my-deployment allow privilege escalation", - "reviewPaths": [], - "failedPaths": [], - "fixPaths": [{ - "path": "spec.template.spec.containers[1].securityContext.allowPrivilegeEscalation", - "value": "false" - - }, - { - "path": "spec.template.spec.containers[1].securityContext.privileged", - "value": "false" - }], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "labels": { - "app": "goproxy" - }, - "name": "my-deployment" + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "app": "goproxy" + }, + "name": "my-deployment" + } + } + ] + } + }, + { + "alertMessage": "container :php in Deployment: my-deployment allow privilege escalation", + "fixPaths": [ + { + "path": "spec.template.spec.containers[1].securityContext.allowPrivilegeEscalation", + "value": "false" + }, + { + "path": "spec.template.spec.containers[1].securityContext.privileged", + "value": "false" } - }] + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "app": "goproxy" + }, + "name": "my-deployment" + } + } + ] + } } -}] \ No newline at end of file +] \ No newline at end of file From 4d6f5d7d832f214c3ef7d1999be0f63f645c1c73 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 1 Apr 2024 11:48:11 +0300 Subject: [PATCH 155/195] linter fix Signed-off-by: YiscahLevySilas1 --- rules/rule-allow-privilege-escalation/raw.rego | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/rules/rule-allow-privilege-escalation/raw.rego b/rules/rule-allow-privilege-escalation/raw.rego index 4b2e6715e..47eab807b 100644 --- a/rules/rule-allow-privilege-escalation/raw.rego +++ b/rules/rule-allow-privilege-escalation/raw.rego @@ -97,7 +97,5 @@ is_allow_privilege_escalation_container(container) { not psp.spec.allowPrivilegeEscalation == false } -get_fix_path(i, start_of_path) = fixPath { - fixPath = [{"path": sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, i]), "value":"false"}, +get_fix_path(i, start_of_path) = [{"path": sprintf("%vcontainers[%v].securityContext.allowPrivilegeEscalation", [start_of_path, i]), "value":"false"}, {"path": sprintf("%vcontainers[%v].securityContext.privileged", [start_of_path, i]), "value":"false"}] -} From 54d0c7d65d8c9f14fe263d889ad9dc37f4ab48fe Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 3 Apr 2024 08:48:01 +0300 Subject: [PATCH 156/195] anonymous auth disabled by default Signed-off-by: YiscahLevySilas1 --- .../raw.rego | 21 +------------- .../invalid-config-no-value/expected.json | 29 +------------------ .../test/no-cli-params/expected.json | 23 +-------------- .../test/valid-cli/input/kubelet-info.json | 2 +- 4 files changed, 4 insertions(+), 71 deletions(-) diff --git a/rules/anonymous-requests-to-kubelet-updated/raw.rego b/rules/anonymous-requests-to-kubelet-updated/raw.rego index 462306ce7..f09b64710 100644 --- a/rules/anonymous-requests-to-kubelet-updated/raw.rego +++ b/rules/anonymous-requests-to-kubelet-updated/raw.rego @@ -22,25 +22,6 @@ deny[msga] { } } -deny[msga] { - obj := input[_] - is_kubelet_info(obj) - command := obj.data.cmdLine - - not contains(command, "--anonymous-auth") - not contains(command, "--config") - - external_obj := json.filter(obj, ["apiVersion", "data/cmdLine", "kind", "metadata"]) - - msga := { - "alertMessage": "Anonymous requests is enabled.", - "alertScore": 7, - "failedPaths": [], - "fixPaths": [], - "packagename": "armo_builtins", - "alertObject": {"externalObjects": external_obj}, - } -} deny[msga] { obj := input[_] @@ -52,7 +33,7 @@ deny[msga] { decodedConfigContent := base64.decode(obj.data.configFile.content) yamlConfig := yaml.unmarshal(decodedConfigContent) - not yamlConfig.authentication.anonymous.enabled == false + yamlConfig.authentication.anonymous.enabled == true msga := { "alertMessage": "Anonymous requests is enabled.", diff --git a/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-no-value/expected.json b/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-no-value/expected.json index d89d4a6ed..0637a088a 100644 --- a/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-no-value/expected.json +++ b/rules/anonymous-requests-to-kubelet-updated/test/invalid-config-no-value/expected.json @@ -1,28 +1 @@ -[ - { - "alertMessage": "Anonymous requests is enabled.", - "alertObject": { - "externalObjects": { - "apiVersion": "hostdata.kubescape.cloud/v1beta0", - "data": { - "configFile": { - "content": "apiVersion: kubelet.config.k8s.io/v1beta1\nstreamingConnectionIdleTimeout: 0\neventRecordQPS: 0\nprotectKernelDefaults: false\nauthentication:\n webhook:\n cacheTTL: 0s\n enabled: true\n x509:\n clientCAFile: /var/lib/minikube/certs/ca.crt\nauthorization:\n mode: Webhook\n webhook:\n cacheAuthorizedTTL: 0s\n cacheUnauthorizedTTL: 0s" - } - }, - "kind": "KubeletInfo", - "metadata": { - "name": "" - } - } - }, - "alertScore": 7, - "reviewPaths": [ - "authentication.anonymous.enabled" - ], - "failedPaths": [ - "authentication.anonymous.enabled" - ], - "fixPaths": [], - "packagename": "armo_builtins" - } -] \ No newline at end of file +[] \ No newline at end of file diff --git a/rules/anonymous-requests-to-kubelet-updated/test/no-cli-params/expected.json b/rules/anonymous-requests-to-kubelet-updated/test/no-cli-params/expected.json index a1644b5b3..0637a088a 100644 --- a/rules/anonymous-requests-to-kubelet-updated/test/no-cli-params/expected.json +++ b/rules/anonymous-requests-to-kubelet-updated/test/no-cli-params/expected.json @@ -1,22 +1 @@ -[ - { - "alertMessage": "Anonymous requests is enabled.", - "alertObject": { - "externalObjects": { - "apiVersion": "hostdata.kubescape.cloud/v1beta0", - "data": { - "cmdLine": "/var/lib/minikube/binaries/v1.23.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf" - }, - "kind": "KubeletInfo", - "metadata": { - "name": "" - } - } - }, - "alertScore": 7, - "failedPaths": [], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins" - } -] \ No newline at end of file +[] \ No newline at end of file diff --git a/rules/anonymous-requests-to-kubelet-updated/test/valid-cli/input/kubelet-info.json b/rules/anonymous-requests-to-kubelet-updated/test/valid-cli/input/kubelet-info.json index 0dc5f80f7..760292ad6 100644 --- a/rules/anonymous-requests-to-kubelet-updated/test/valid-cli/input/kubelet-info.json +++ b/rules/anonymous-requests-to-kubelet-updated/test/valid-cli/input/kubelet-info.json @@ -2,7 +2,7 @@ "apiVersion": "hostdata.kubescape.cloud/v1beta0", "kind": "KubeletInfo", "data": { - "cmdLine": "/var/lib/minikube/binaries/v1.23.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --anonymous-auth=false --config=ss", + "cmdLine": "/var/lib/minikube/binaries/v1.23.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=ss", "configFile": { "content": "YXBpVmVyc2lvbjoga3ViZWxldC5jb25maWcuazhzLmlvL3YxYmV0YTEKc3RyZWFtaW5nQ29ubmVjdGlvbklkbGVUaW1lb3V0OiAwCmV2ZW50UmVjb3JkUVBTOiAwCnByb3RlY3RLZXJuZWxEZWZhdWx0czogZmFsc2UKYXV0aGVudGljYXRpb246CiAgYW5vbnltb3VzOgogICAgZW5hYmxlZDogZmFsc2UKICB3ZWJob29rOgogICAgY2FjaGVUVEw6IDBzCiAgICBlbmFibGVkOiB0cnVlCiAgeDUwOToKICAgIGNsaWVudENBRmlsZTogL3Zhci9saWIvbWluaWt1YmUvY2VydHMvY2EuY3J0CmF1dGhvcml6YXRpb246CiAgbW9kZTogV2ViaG9vawogIHdlYmhvb2s6CiAgICBjYWNoZUF1dGhvcml6ZWRUVEw6IDBzCiAgICBjYWNoZVVuYXV0aG9yaXplZFRUTDogMHM=" } From d95600289c0bf31f8f366995fe35af0dd43dbf2b Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 3 Apr 2024 09:00:53 +0300 Subject: [PATCH 157/195] fix to valid remediation Signed-off-by: YiscahLevySilas1 --- rules/alert-any-hostpath/raw.rego | 31 ++++++++---- .../test/deployment/expected.json | 20 ++++---- .../test/deployment/input/deployment.yaml | 2 +- .../alert-any-hostpath/test/pod/expected.json | 48 ++++++++++--------- 4 files changed, 62 insertions(+), 39 deletions(-) diff --git a/rules/alert-any-hostpath/raw.rego b/rules/alert-any-hostpath/raw.rego index 581e43660..2bc5e3b19 100644 --- a/rules/alert-any-hostpath/raw.rego +++ b/rules/alert-any-hostpath/raw.rego @@ -9,14 +9,17 @@ deny[msga] { start_of_path := "spec." result := is_dangerous_volume(volume, start_of_path, i) podname := pod.metadata.name + volumeMounts := pod.spec.containers[j].volumeMounts + pathMounts = volume_mounts(volume.name, volumeMounts, sprintf("spec.containers[%v]", [j])) + finalPath := array.concat([result], pathMounts) msga := { "alertMessage": sprintf("pod: %v has: %v as hostPath volume", [podname, volume.name]), "packagename": "armo_builtins", "alertScore": 7, - "deletePaths": [result], - "failedPaths": [result], + "deletePaths": finalPath, + "failedPaths": finalPath, "fixPaths":[], "alertObject": { "k8sApiObjects": [pod] @@ -33,14 +36,17 @@ deny[msga] { volume := volumes[i] start_of_path := "spec.template.spec." result := is_dangerous_volume(volume, start_of_path, i) + volumeMounts := wl.spec.template.spec.containers[j].volumeMounts + pathMounts = volume_mounts(volume.name,volumeMounts, sprintf("spec.template.spec.containers[%v]", [j])) + finalPath := array.concat([result], pathMounts) msga := { "alertMessage": sprintf("%v: %v has: %v as hostPath volume", [wl.kind, wl.metadata.name, volume.name]), "packagename": "armo_builtins", "alertScore": 7, - "deletePaths": [result], - "failedPaths": [result], + "deletePaths": finalPath, + "failedPaths": finalPath, "fixPaths":[], "alertObject": { "k8sApiObjects": [wl] @@ -56,12 +62,16 @@ deny[msga] { volume := volumes[i] start_of_path := "spec.jobTemplate.spec.template.spec." result := is_dangerous_volume(volume, start_of_path, i) + volumeMounts := wl.spec.jobTemplate.spec.template.spec.containers[j].volumeMounts + pathMounts = volume_mounts(volume.name,volumeMounts, sprintf("spec.jobTemplate.spec.template.spec.containers[%v]", [j])) + finalPath := array.concat([result], pathMounts) + msga := { "alertMessage": sprintf("%v: %v has: %v as hostPath volume", [wl.kind, wl.metadata.name, volume.name]), "packagename": "armo_builtins", "alertScore": 7, - "deletePaths": [result], - "failedPaths": [result], + "deletePaths": finalPath, + "failedPaths": finalPath, "fixPaths":[], "alertObject": { "k8sApiObjects": [wl] @@ -71,5 +81,10 @@ deny[msga] { is_dangerous_volume(volume, start_of_path, i) = path { volume.hostPath.path - path = sprintf("%vvolumes[%v].hostPath.path", [start_of_path, format_int(i, 10)]) -} \ No newline at end of file + path = sprintf("%vvolumes[%v]", [start_of_path, format_int(i, 10)]) +} + +volume_mounts(name, volume_mounts, str) = [path] { + name == volume_mounts[j].name + path := sprintf("%s.volumeMounts[%v]", [str, j]) +} else = [] \ No newline at end of file diff --git a/rules/alert-any-hostpath/test/deployment/expected.json b/rules/alert-any-hostpath/test/deployment/expected.json index 7c9507a2d..4825bb3f9 100644 --- a/rules/alert-any-hostpath/test/deployment/expected.json +++ b/rules/alert-any-hostpath/test/deployment/expected.json @@ -1,11 +1,13 @@ [ { "alertMessage": "Deployment: my-deployment has: test-volume as hostPath volume", - "deletePaths": [ - "spec.template.spec.volumes[0].hostPath.path" - ], "failedPaths": [ - "spec.template.spec.volumes[0].hostPath.path" + "spec.template.spec.volumes[0]", + "spec.template.spec.containers[0].volumeMounts[0]" + ], + "deletePaths": [ + "spec.template.spec.volumes[0]", + "spec.template.spec.containers[0].volumeMounts[0]" ], "fixPaths": [], "ruleStatus": "", @@ -28,11 +30,13 @@ }, { "alertMessage": "Deployment: my-deployment has: test-volume2 as hostPath volume", - "deletePaths": [ - "spec.template.spec.volumes[1].hostPath.path" - ], "failedPaths": [ - "spec.template.spec.volumes[1].hostPath.path" + "spec.template.spec.volumes[1]", + "spec.template.spec.containers[0].volumeMounts[1]" + ], + "deletePaths": [ + "spec.template.spec.volumes[1]", + "spec.template.spec.containers[0].volumeMounts[1]" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/alert-any-hostpath/test/deployment/input/deployment.yaml b/rules/alert-any-hostpath/test/deployment/input/deployment.yaml index c25ccfec4..5585f9402 100644 --- a/rules/alert-any-hostpath/test/deployment/input/deployment.yaml +++ b/rules/alert-any-hostpath/test/deployment/input/deployment.yaml @@ -23,7 +23,7 @@ spec: name : test-volume - mountPath : /test-pd2 - name : test-volume + name : test-volume2 volumes : - name : test-volume hostPath : diff --git a/rules/alert-any-hostpath/test/pod/expected.json b/rules/alert-any-hostpath/test/pod/expected.json index d93123451..d4c433aeb 100644 --- a/rules/alert-any-hostpath/test/pod/expected.json +++ b/rules/alert-any-hostpath/test/pod/expected.json @@ -1,24 +1,28 @@ -{ - "alertMessage": "pod: test-pd has: test-volume as hostPath volume", - "deletePaths": [ - "spec.volumes[0].hostPath.path" - ], - "failedPaths": [ - "spec.volumes[0].hostPath.path" - ], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": "test-pd" +[ + { + "alertMessage": "pod: test-pd has: test-volume as hostPath volume", + "failedPaths": [ + "spec.volumes[0]", + "spec.containers[0].volumeMounts[0]" + ], + "deletePaths": [ + "spec.volumes[0]", + "spec.containers[0].volumeMounts[0]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "test-pd" + } } - } - ] + ] + } } -} \ No newline at end of file +] \ No newline at end of file From 841a0447f6060b5727ddf58565319577d08099cd Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 3 Apr 2024 10:19:06 +0300 Subject: [PATCH 158/195] improve remediation - always return fixpath Signed-off-by: YiscahLevySilas1 --- rules/alert-rw-hostpath/raw.rego | 45 ++------- .../test/deployment/expected.json | 92 ++++++++++--------- 2 files changed, 59 insertions(+), 78 deletions(-) diff --git a/rules/alert-rw-hostpath/raw.rego b/rules/alert-rw-hostpath/raw.rego index 96195abb3..d3f989e09 100644 --- a/rules/alert-rw-hostpath/raw.rego +++ b/rules/alert-rw-hostpath/raw.rego @@ -12,9 +12,7 @@ deny[msga] { volume_mount := container.volumeMounts[k] volume_mount.name == volume.name start_of_path := "spec." - result := is_rw_mount(volume_mount, start_of_path, i, k) - failed_path := get_failed_path(result) - fixed_path := get_fixed_path(result) + fix_path := is_rw_mount(volume_mount, start_of_path, i, k) podname := pod.metadata.name @@ -22,9 +20,7 @@ deny[msga] { "alertMessage": sprintf("pod: %v has: %v as hostPath volume", [podname, volume.name]), "packagename": "armo_builtins", "alertScore": 7, - "fixPaths": fixed_path, - "deletePaths": failed_path, - "failedPaths": failed_path, + "fixPaths": [fix_path], "alertObject": { "k8sApiObjects": [pod] } @@ -43,17 +39,13 @@ deny[msga] { volume_mount := container.volumeMounts[k] volume_mount.name == volume.name start_of_path := "spec.template.spec." - result := is_rw_mount(volume_mount, start_of_path, i, k) - failed_path := get_failed_path(result) - fixed_path := get_fixed_path(result) + fix_path := is_rw_mount(volume_mount, start_of_path, i, k) msga := { "alertMessage": sprintf("%v: %v has: %v as hostPath volume", [wl.kind, wl.metadata.name, volume.name]), "packagename": "armo_builtins", "alertScore": 7, - "fixPaths": fixed_path, - "deletePaths": failed_path, - "failedPaths": failed_path, + "fixPaths": [fix_path], "alertObject": { "k8sApiObjects": [wl] } @@ -73,43 +65,22 @@ deny[msga] { volume_mount := container.volumeMounts[k] volume_mount.name == volume.name start_of_path := "spec.jobTemplate.spec.template.spec." - result := is_rw_mount(volume_mount, start_of_path, i, k) - failed_path := get_failed_path(result) - fixed_path := get_fixed_path(result) + fix_path := is_rw_mount(volume_mount, start_of_path, i, k) msga := { "alertMessage": sprintf("%v: %v has: %v as hostPath volume", [wl.kind, wl.metadata.name, volume.name]), "packagename": "armo_builtins", "alertScore": 7, - "fixPaths": fixed_path, - "deletePaths": failed_path, - "failedPaths": failed_path, + "fixPaths": [fix_path], "alertObject": { "k8sApiObjects": [wl] } } } -get_failed_path(paths) = [paths[0]] { - paths[0] != "" -} else = [] - -get_fixed_path(paths) = [paths[1]] { - paths[1] != "" -} else = [] - - -is_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] { +is_rw_mount(mount, start_of_path, i, k) = fix_path { not mount.readOnly == true - not mount.readOnly == false - failed_path = "" - fix_path = {"path": sprintf("%vcontainers[%v].volumeMounts[%v].readOnly", [start_of_path, format_int(i, 10), format_int(k, 10)]), "value":"true"} + fix_path = {"path": sprintf("%vcontainers[%v].volumeMounts[%v].readOnly", [start_of_path, i, k]), "value":"true"} } - -is_rw_mount(mount, start_of_path, i, k) = [failed_path, fix_path] { - mount.readOnly == false - failed_path = sprintf("%vcontainers[%v].volumeMounts[%v].readOnly", [start_of_path, format_int(i, 10), format_int(k, 10)]) - fix_path = "" -} \ No newline at end of file diff --git a/rules/alert-rw-hostpath/test/deployment/expected.json b/rules/alert-rw-hostpath/test/deployment/expected.json index 79fdc914c..f142fca21 100644 --- a/rules/alert-rw-hostpath/test/deployment/expected.json +++ b/rules/alert-rw-hostpath/test/deployment/expected.json @@ -1,44 +1,54 @@ -[{ - "alertMessage": "Deployment: my-deployment has: test-volume as hostPath volume", - "deletePaths": ["spec.template.spec.containers[0].volumeMounts[0].readOnly"], - "failedPaths": ["spec.template.spec.containers[0].volumeMounts[0].readOnly"], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "labels": { - "purpose": "demonstrate-command" - }, - "name": "my-deployment" +[ + { + "alertMessage": "Deployment: my-deployment has: test-volume as hostPath volume", + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].volumeMounts[0].readOnly", + "value": "true" } - }] - } -}, { - "alertMessage": "Deployment: my-deployment has: test-volume as hostPath volume", - "deletePaths": [], - "failedPaths": [], - "fixPaths": [{ - "path": "spec.template.spec.containers[0].volumeMounts[1].readOnly", - "value": "true" - }], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "labels": { - "purpose": "demonstrate-command" - }, - "name": "my-deployment" + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "my-deployment" + } + } + ] + } + }, + { + "alertMessage": "Deployment: my-deployment has: test-volume as hostPath volume", + "fixPaths": [ + { + "path": "spec.template.spec.containers[0].volumeMounts[1].readOnly", + "value": "true" } - }] + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "labels": { + "purpose": "demonstrate-command" + }, + "name": "my-deployment" + } + } + ] + } } -}] \ No newline at end of file +] \ No newline at end of file From 0cf5571692ce8fb13d014620b65fa90ea308c045 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Wed, 3 Apr 2024 10:56:01 +0300 Subject: [PATCH 159/195] fix to valid remediation Signed-off-by: YiscahLevySilas1 --- .../raw.rego | 41 +++++++++------- .../test/deployment_eks_failed/expected.json | 20 ++++---- .../input/deployment.yaml | 2 +- .../test/pod_eks_failed/expected.json | 48 ++++++++++--------- 4 files changed, 63 insertions(+), 48 deletions(-) diff --git a/rules/alert-mount-potential-credentials-paths/raw.rego b/rules/alert-mount-potential-credentials-paths/raw.rego index f28c15776..68e476863 100644 --- a/rules/alert-mount-potential-credentials-paths/raw.rego +++ b/rules/alert-mount-potential-credentials-paths/raw.rego @@ -6,18 +6,22 @@ deny[msga] { provider := data.dataControlInputs.cloudProvider provider != "" resources := input[_] - volumes_data := get_volumes(resources) - volumes := volumes_data["volumes"] + spec_data := get_pod_spec(resources) + spec := spec_data["spec"] + volumes := spec.volumes volume := volumes[i] - start_of_path := volumes_data["start_of_path"] - result := is_unsafe_paths(volume, start_of_path, provider,i) + start_of_path := spec_data["start_of_path"] + result := is_unsafe_paths(volume, start_of_path, provider, i) + volumeMounts := spec.containers[j].volumeMounts + pathMounts = volume_mounts(volume.name, volumeMounts, sprintf("%vcontainers[%d]", [start_of_path, j])) + finalPath := array.concat([result], pathMounts) msga := { "alertMessage": sprintf("%v: %v has: %v as volume with potential credentials access.", [resources.kind, resources.metadata.name, volume.name]), "packagename": "armo_builtins", "alertScore": 7, - "deletePaths": [result], - "failedPaths": [result], + "deletePaths": finalPath, + "failedPaths": finalPath, "fixPaths":[], "alertObject": { "k8sApiObjects": [resources] @@ -25,24 +29,23 @@ deny[msga] { } } - -# get_volume - get resource volumes paths for {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} -get_volumes(resources) := result { +# get_volume - get resource spec paths for {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} +get_pod_spec(resources) := result { resources_kinds := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"} resources_kinds[resources.kind] - result = {"volumes": resources.spec.template.spec.volumes, "start_of_path": "spec.template.spec."} + result = {"spec": resources.spec.template.spec, "start_of_path": "spec.template.spec."} } -# get_volume - get resource volumes paths for "Pod" -get_volumes(resources) := result { +# get_volume - get resource spec paths for "Pod" +get_pod_spec(resources) := result { resources.kind == "Pod" - result = {"volumes": resources.spec.volumes, "start_of_path": "spec."} + result = {"spec": resources.spec, "start_of_path": "spec."} } -# get_volume - get resource volumes paths for "CronJob" -get_volumes(resources) := result { +# get_volume - get resource spec paths for "CronJob" +get_pod_spec(resources) := result { resources.kind == "CronJob" - result = {"volumes": resources.spec.jobTemplate.spec.template.spec.volumes, "start_of_path": "spec.jobTemplate.spec.template.spec."} + result = {"spec": resources.spec.jobTemplate.spec.template.spec, "start_of_path": "spec.jobTemplate.spec.template.spec."} } @@ -50,7 +53,7 @@ get_volumes(resources) := result { is_unsafe_paths(volume, start_of_path, provider, i) = result { unsafe := unsafe_paths(provider) unsafe[_] == fix_path(volume.hostPath.path) - result= sprintf("%vvolumes[%d].hostPath.path", [start_of_path, i]) + result = sprintf("%vvolumes[%d]", [start_of_path, i]) } @@ -89,3 +92,7 @@ unsafe_paths(x) := ["/.config/gcloud/", "/.config/gcloud/application_default_credentials.json", "/gcloud/application_default_credentials.json"] if {x=="gke"} +volume_mounts(name, volume_mounts, str) = [path] { + name == volume_mounts[j].name + path := sprintf("%s.volumeMounts[%v]", [str, j]) +} else = [] \ No newline at end of file diff --git a/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/expected.json b/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/expected.json index 98f764d96..4b25ada8c 100644 --- a/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/expected.json +++ b/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/expected.json @@ -1,11 +1,13 @@ [ { "alertMessage": "Deployment: my-deployment has: test-volume as volume with potential credentials access.", - "deletePaths": [ - "spec.template.spec.volumes[0].hostPath.path" - ], "failedPaths": [ - "spec.template.spec.volumes[0].hostPath.path" + "spec.template.spec.volumes[0]", + "spec.template.spec.containers[0].volumeMounts[0]" + ], + "deletePaths": [ + "spec.template.spec.volumes[0]", + "spec.template.spec.containers[0].volumeMounts[0]" ], "fixPaths": [], "ruleStatus": "", @@ -28,11 +30,13 @@ }, { "alertMessage": "Deployment: my-deployment has: test-volume2 as volume with potential credentials access.", - "deletePaths": [ - "spec.template.spec.volumes[1].hostPath.path" - ], "failedPaths": [ - "spec.template.spec.volumes[1].hostPath.path" + "spec.template.spec.volumes[1]", + "spec.template.spec.containers[0].volumeMounts[1]" + ], + "deletePaths": [ + "spec.template.spec.volumes[1]", + "spec.template.spec.containers[0].volumeMounts[1]" ], "fixPaths": [], "ruleStatus": "", diff --git a/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/input/deployment.yaml b/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/input/deployment.yaml index cf95f620b..aa07d6fa6 100644 --- a/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/input/deployment.yaml +++ b/rules/alert-mount-potential-credentials-paths/test/deployment_eks_failed/input/deployment.yaml @@ -23,7 +23,7 @@ spec: name : test-volume - mountPath : /test-pd2 - name : test-volume + name : test-volume2 volumes : - name : test-volume hostPath : diff --git a/rules/alert-mount-potential-credentials-paths/test/pod_eks_failed/expected.json b/rules/alert-mount-potential-credentials-paths/test/pod_eks_failed/expected.json index 4636386ac..00f0f7995 100644 --- a/rules/alert-mount-potential-credentials-paths/test/pod_eks_failed/expected.json +++ b/rules/alert-mount-potential-credentials-paths/test/pod_eks_failed/expected.json @@ -1,24 +1,28 @@ -{ - "alertMessage": "Pod: test-pd has: test-volume as volume with potential credentials access.", - "deletePaths": [ - "spec.volumes[0].hostPath.path" - ], - "failedPaths": [ - "spec.volumes[0].hostPath.path" - ], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": "test-pd" +[ + { + "alertMessage": "Pod: test-pd has: test-volume as volume with potential credentials access.", + "failedPaths": [ + "spec.volumes[0]", + "spec.containers[0].volumeMounts[0]" + ], + "deletePaths": [ + "spec.volumes[0]", + "spec.containers[0].volumeMounts[0]" + ], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "test-pd" + } } - } - ] + ] + } } -} \ No newline at end of file +] \ No newline at end of file From 79e06ab43a16f9ee81369d4459326e5bac6c1d71 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 14 Apr 2024 13:30:26 +0300 Subject: [PATCH 160/195] add control to security fw, add test case Signed-off-by: YiscahLevySilas1 --- .../C-0266-exposuretointernet-gateway.json | 4 +- frameworks/security.json | 6 ++ .../failed_with_httproute-istio/expected.json | 21 +++++ .../input/deployment.yaml | 79 +++++++++++++++++++ .../input/httproute.yaml | 71 +++++++++++++++++ .../input/service.yaml | 33 ++++++++ 6 files changed, 212 insertions(+), 2 deletions(-) create mode 100644 rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/expected.json create mode 100644 rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/deployment.yaml create mode 100644 rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/httproute.yaml create mode 100644 rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/service.yaml diff --git a/controls/C-0266-exposuretointernet-gateway.json b/controls/C-0266-exposuretointernet-gateway.json index 3eb0665fe..b97dc336c 100644 --- a/controls/C-0266-exposuretointernet-gateway.json +++ b/controls/C-0266-exposuretointernet-gateway.json @@ -19,10 +19,10 @@ } ] }, - "description": "This control detect workloads that are exposed on Internet through a Gateway API (HTTPRoute,TCPRoute, UDPRoute). It fails in case it find workloads connected with these resources.", + "description": "This control detect workloads that are exposed on Internet through a Gateway API (using an HTTPRoute). It fails in case it find workloads connected with these resources.", "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", "rulesNames": ["exposure-to-internet-via-gateway-api"], - "test": "Checks if workloads are exposed through the use of Gateway API (HTTPRoute, TCPRoute, UDPRoute).", + "test": "Checks if workloads are exposed through the use of Gateway API (using an HTTPRoute).", "controlID": "C-0266", "baseScore": 7.0, "scanningScope": { diff --git a/frameworks/security.json b/frameworks/security.json index 42010a264..9e2846b61 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -127,6 +127,12 @@ "patch": { "name": "Host PID/IPC privileges" } + }, + { + "controlID": "C-0266", + "patch": { + "name": "Exposure to internet via Gateway API" + } } ] } diff --git a/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/expected.json b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/expected.json new file mode 100644 index 000000000..c00e5ec76 --- /dev/null +++ b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/expected.json @@ -0,0 +1,21 @@ +[ + { + "alertMessage": "workload 'httpbin' is exposed through httproute 'http'", + "failedPaths": [], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "httpbin" + } + } + ] + } + } +] \ No newline at end of file diff --git a/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/deployment.yaml b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/deployment.yaml new file mode 100644 index 000000000..98b05fcf7 --- /dev/null +++ b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/deployment.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"name":"httpbin","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"httpbin","version":"v1"}},"template":{"metadata":{"labels":{"app":"httpbin","version":"v1"}},"spec":{"containers":[{"command":["gunicorn","-b","0.0.0.0:8080","httpbin:app","-k","gevent"],"env":[{"name":"WORKON_HOME","value":"/tmp"}],"image":"docker.io/kong/httpbin","imagePullPolicy":"IfNotPresent","name":"httpbin","ports":[{"containerPort":8080}]}],"serviceAccountName":"httpbin"}}}} + creationTimestamp: "2024-04-14T07:39:35Z" + generation: 1 + name: httpbin + namespace: default + resourceVersion: "2376" + uid: d5e57f81-0001-4454-9623-c3d8bb429c90 +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: httpbin + version: v1 + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: httpbin + version: v1 + spec: + containers: + - command: + - gunicorn + - -b + - 0.0.0.0:8080 + - httpbin:app + - -k + - gevent + env: + - name: WORKON_HOME + value: /tmp + image: docker.io/kong/httpbin + imagePullPolicy: IfNotPresent + name: httpbin + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: httpbin + serviceAccountName: httpbin + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2024-04-14T07:39:48Z" + lastUpdateTime: "2024-04-14T07:39:48Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2024-04-14T07:39:35Z" + lastUpdateTime: "2024-04-14T07:39:48Z" + message: ReplicaSet "httpbin-54b5c865df" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 \ No newline at end of file diff --git a/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/httproute.yaml b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/httproute.yaml new file mode 100644 index 000000000..56ae5046c --- /dev/null +++ b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/httproute.yaml @@ -0,0 +1,71 @@ +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"gateway.networking.k8s.io/v1beta1","kind":"HTTPRoute","metadata":{"annotations":{},"name":"http","namespace":"default"},"spec":{"hostnames":["httpbin.example.com"],"parentRefs":[{"name":"gateway","namespace":"istio-ingress"}],"rules":[{"backendRefs":[{"name":"httpbin","port":8000}],"matches":[{"path":{"type":"PathPrefix","value":"/get"}}]}]}} + creationTimestamp: "2024-04-14T07:41:31Z" + generation: 1 + name: http + namespace: default + resourceVersion: "2647" + uid: b7c1d09f-0cf8-4fc6-ada8-ec415b463038 +spec: + hostnames: + - httpbin.example.com + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: gateway + namespace: istio-ingress + rules: + - backendRefs: + - group: "" + kind: Service + name: httpbin + port: 8000 + weight: 1 + matches: + - path: + type: PathPrefix + value: /get +status: + parents: + - conditions: + - lastTransitionTime: "2024-04-14T07:41:38Z" + message: "" + observedGeneration: 1 + reason: Accepted + status: "True" + type: Accepted + - lastTransitionTime: "2024-04-14T07:41:38Z" + message: "" + observedGeneration: 1 + reason: ResolvedRefs + status: "True" + type: ResolvedRefs + controllerName: solo.io/gloo-gateway + parentRef: + group: gateway.networking.k8s.io + kind: Gateway + name: gateway + namespace: istio-ingress + - conditions: + - lastTransitionTime: "2024-04-14T07:41:38Z" + message: Route was valid + observedGeneration: 1 + reason: Accepted + status: "True" + type: Accepted + - lastTransitionTime: "2024-04-14T07:41:38Z" + message: All references resolved + observedGeneration: 1 + reason: ResolvedRefs + status: "True" + type: ResolvedRefs + controllerName: istio.io/gateway-controller + parentRef: + group: gateway.networking.k8s.io + kind: Gateway + name: gateway + namespace: istio-ingress diff --git a/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/service.yaml b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/service.yaml new file mode 100644 index 000000000..0177f6752 --- /dev/null +++ b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/input/service.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"app":"httpbin","service":"httpbin"},"name":"httpbin","namespace":"default"},"spec":{"ports":[{"name":"http","port":8000,"targetPort":8080}],"selector":{"app":"httpbin"}}} + creationTimestamp: "2024-04-14T07:39:35Z" + labels: + app: httpbin + service: httpbin + name: httpbin + namespace: default + resourceVersion: "2328" + uid: 5b675069-a387-4fa4-83b6-8fd25462f714 +spec: + clusterIP: 10.96.126.137 + clusterIPs: + - 10.96.126.137 + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: http + port: 8000 + protocol: TCP + targetPort: 8080 + selector: + app: httpbin + sessionAffinity: None + type: ClusterIP +status: + loadBalancer: {} \ No newline at end of file From 0fee4ec397cda6566e193b2e3dba4246a0f4c704 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 14 Apr 2024 13:43:30 +0300 Subject: [PATCH 161/195] update expected Signed-off-by: YiscahLevySilas1 --- .../failed_with_httproute-istio/expected.json | 166 +++++++++++++++- .../test/failed_with_httproute/expected.json | 180 ++++++++++++++++-- 2 files changed, 325 insertions(+), 21 deletions(-) diff --git a/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/expected.json b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/expected.json index c00e5ec76..defc283a5 100644 --- a/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/expected.json +++ b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute-istio/expected.json @@ -16,6 +16,170 @@ } } ] - } + }, + "relatedObjects": [ + { + "object": { + "apiVersion": "gateway.networking.k8s.io/v1", + "kind": "HTTPRoute", + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"gateway.networking.k8s.io/v1beta1\",\"kind\":\"HTTPRoute\",\"metadata\":{\"annotations\":{},\"name\":\"http\",\"namespace\":\"default\"},\"spec\":{\"hostnames\":[\"httpbin.example.com\"],\"parentRefs\":[{\"name\":\"gateway\",\"namespace\":\"istio-ingress\"}],\"rules\":[{\"backendRefs\":[{\"name\":\"httpbin\",\"port\":8000}],\"matches\":[{\"path\":{\"type\":\"PathPrefix\",\"value\":\"/get\"}}]}]}}\n" + }, + "creationTimestamp": "2024-04-14T07:41:31Z", + "generation": 1, + "name": "http", + "namespace": "default", + "resourceVersion": "2647", + "uid": "b7c1d09f-0cf8-4fc6-ada8-ec415b463038" + }, + "spec": { + "hostnames": [ + "httpbin.example.com" + ], + "parentRefs": [ + { + "group": "gateway.networking.k8s.io", + "kind": "Gateway", + "name": "gateway", + "namespace": "istio-ingress" + } + ], + "rules": [ + { + "backendRefs": [ + { + "group": "", + "kind": "Service", + "name": "httpbin", + "port": 8000, + "weight": 1 + } + ], + "matches": [ + { + "path": { + "type": "PathPrefix", + "value": "/get" + } + } + ] + } + ] + }, + "status": { + "parents": [ + { + "conditions": [ + { + "lastTransitionTime": "2024-04-14T07:41:38Z", + "message": "", + "observedGeneration": 1, + "reason": "Accepted", + "status": "True", + "type": "Accepted" + }, + { + "lastTransitionTime": "2024-04-14T07:41:38Z", + "message": "", + "observedGeneration": 1, + "reason": "ResolvedRefs", + "status": "True", + "type": "ResolvedRefs" + } + ], + "controllerName": "solo.io/gloo-gateway", + "parentRef": { + "group": "gateway.networking.k8s.io", + "kind": "Gateway", + "name": "gateway", + "namespace": "istio-ingress" + } + }, + { + "conditions": [ + { + "lastTransitionTime": "2024-04-14T07:41:38Z", + "message": "Route was valid", + "observedGeneration": 1, + "reason": "Accepted", + "status": "True", + "type": "Accepted" + }, + { + "lastTransitionTime": "2024-04-14T07:41:38Z", + "message": "All references resolved", + "observedGeneration": 1, + "reason": "ResolvedRefs", + "status": "True", + "type": "ResolvedRefs" + } + ], + "controllerName": "istio.io/gateway-controller", + "parentRef": { + "group": "gateway.networking.k8s.io", + "kind": "Gateway", + "name": "gateway", + "namespace": "istio-ingress" + } + } + ] + } + }, + "failedPaths": [ + "spec.rules[0].backendRefs[0].name" + ], + "reviewPaths": [ + "spec.rules[0].backendRefs[0].name" + ] + }, + { + "object": { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"httpbin\",\"service\":\"httpbin\"},\"name\":\"httpbin\",\"namespace\":\"default\"},\"spec\":{\"ports\":[{\"name\":\"http\",\"port\":8000,\"targetPort\":8080}],\"selector\":{\"app\":\"httpbin\"}}}\n" + }, + "creationTimestamp": "2024-04-14T07:39:35Z", + "labels": { + "app": "httpbin", + "service": "httpbin" + }, + "name": "httpbin", + "namespace": "default", + "resourceVersion": "2328", + "uid": "5b675069-a387-4fa4-83b6-8fd25462f714" + }, + "spec": { + "clusterIP": "10.96.126.137", + "clusterIPs": [ + "10.96.126.137" + ], + "internalTrafficPolicy": "Cluster", + "ipFamilies": [ + "IPv4" + ], + "ipFamilyPolicy": "SingleStack", + "ports": [ + { + "name": "http", + "port": 8000, + "protocol": "TCP", + "targetPort": 8080 + } + ], + "selector": { + "app": "httpbin" + }, + "sessionAffinity": "None", + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } + } + } + ] } ] \ No newline at end of file diff --git a/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/expected.json b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/expected.json index 84bc8246a..1c52ffa5d 100644 --- a/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/expected.json +++ b/rules/exposure-to-internet-via-gateway-api/test/failed_with_httproute/expected.json @@ -1,21 +1,161 @@ [ - { - "alertMessage": "workload 'httpbin' is exposed through httproute 'httpbin'", - "failedPaths": [], - "fixPaths": [], - "ruleStatus": "", - "packagename": "armo_builtins", - "alertScore": 7, - "alertObject": { - "k8sApiObjects": [ - { - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "name": "httpbin" - } - } - ] - } - } - ] \ No newline at end of file + { + "alertMessage": "workload 'httpbin' is exposed through httproute 'httpbin'", + "failedPaths": [], + "fixPaths": [], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "httpbin" + } + } + ] + }, + "relatedObjects": [ + { + "object": { + "apiVersion": "gateway.networking.k8s.io/v1", + "kind": "HTTPRoute", + "metadata": { + "creationTimestamp": "2024-02-04T19:06:03Z", + "generation": 1, + "labels": { + "example": "httpbin-route" + }, + "name": "httpbin", + "namespace": "httpbin", + "resourceVersion": "914", + "uid": "fd820080-801d-4fa7-934a-e23abe8bf746" + }, + "spec": { + "hostnames": [ + "www.example.com" + ], + "parentRefs": [ + { + "group": "gateway.networking.k8s.io", + "kind": "Gateway", + "name": "http", + "namespace": "gloo-system" + } + ], + "rules": [ + { + "backendRefs": [ + { + "group": "", + "kind": "Service", + "name": "httpbin", + "port": 8000, + "weight": 1 + } + ], + "matches": [ + { + "path": { + "type": "PathPrefix", + "value": "/" + } + } + ] + } + ] + }, + "status": { + "parents": [ + { + "conditions": [ + { + "lastTransitionTime": "2024-02-04T19:06:03Z", + "message": "", + "observedGeneration": 1, + "reason": "Accepted", + "status": "True", + "type": "Accepted" + }, + { + "lastTransitionTime": "2024-02-04T19:06:03Z", + "message": "", + "observedGeneration": 1, + "reason": "ResolvedRefs", + "status": "True", + "type": "ResolvedRefs" + } + ], + "controllerName": "solo.io/gloo-gateway", + "parentRef": { + "group": "gateway.networking.k8s.io", + "kind": "Gateway", + "name": "http", + "namespace": "gloo-system" + } + } + ] + } + }, + "failedPaths": [ + "spec.rules[0].backendRefs[0].name" + ], + "reviewPaths": [ + "spec.rules[0].backendRefs[0].name" + ] + }, + { + "object": { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "creationTimestamp": "2024-02-04T19:05:12Z", + "labels": { + "app": "httpbin", + "service": "httpbin" + }, + "name": "httpbin", + "namespace": "httpbin", + "resourceVersion": "811", + "uid": "c391feb7-54e5-41b2-869b-33166869f1b7" + }, + "spec": { + "clusterIP": "10.96.162.234", + "clusterIPs": [ + "10.96.162.234" + ], + "internalTrafficPolicy": "Cluster", + "ipFamilies": [ + "IPv4" + ], + "ipFamilyPolicy": "SingleStack", + "ports": [ + { + "name": "http", + "port": 8000, + "protocol": "TCP", + "targetPort": 8080 + }, + { + "name": "tcp", + "port": 9000, + "protocol": "TCP", + "targetPort": 9000 + } + ], + "selector": { + "app": "httpbin" + }, + "sessionAffinity": "None", + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } + } + } + ] + } +] \ No newline at end of file From 6f60b062387e294ed9daf5ebecaeb50d36611fbd Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 14 Apr 2024 13:51:51 +0300 Subject: [PATCH 162/195] rm armoBuiltin attribute Signed-off-by: YiscahLevySilas1 --- frameworks/__YAMLscan.json | 4 +--- frameworks/allcontrols.json | 4 +--- frameworks/armobest.json | 4 +--- frameworks/cis-aks-t1.2.0.json | 1 - frameworks/cis-eks-t1.2.0.json | 1 - frameworks/cis-v1.23-t1.0.1.json | 1 - frameworks/clusterscan.json | 4 +--- frameworks/devopsbest.json | 4 +--- frameworks/mitre.json | 4 +--- frameworks/nsaframework.json | 4 +--- frameworks/security.json | 4 +--- frameworks/soc2.json | 4 +--- frameworks/workloadscan.json | 4 +--- 13 files changed, 10 insertions(+), 33 deletions(-) diff --git a/frameworks/__YAMLscan.json b/frameworks/__YAMLscan.json index a88bca10c..31896edd4 100644 --- a/frameworks/__YAMLscan.json +++ b/frameworks/__YAMLscan.json @@ -1,9 +1,7 @@ { "name": "YAML-scanning", "description": "Controls relevant to yamls", - "attributes": { - "armoBuiltin": true - }, + "attributes": {}, "scanningScope": { "matches": [ "file" diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index 173558c36..453f81327 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -1,9 +1,7 @@ { "name": "AllControls", "description": "Contains all the controls from all the frameworks", - "attributes": { - "armoBuiltin": true - }, + "attributes": {}, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/armobest.json b/frameworks/armobest.json index 771d04bf1..83e0d58be 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -1,9 +1,7 @@ { "name": "ArmoBest", "description": "", - "attributes": { - "armoBuiltin": true - }, + "attributes": {}, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/cis-aks-t1.2.0.json b/frameworks/cis-aks-t1.2.0.json index 9fc2300bf..f0a07ad97 100644 --- a/frameworks/cis-aks-t1.2.0.json +++ b/frameworks/cis-aks-t1.2.0.json @@ -2,7 +2,6 @@ "name": "cis-aks-t1.2.0", "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", "attributes": { - "armoBuiltin": true, "version": "v1.2.0" }, "scanningScope": { diff --git a/frameworks/cis-eks-t1.2.0.json b/frameworks/cis-eks-t1.2.0.json index f9e23806a..a1c49b3f2 100644 --- a/frameworks/cis-eks-t1.2.0.json +++ b/frameworks/cis-eks-t1.2.0.json @@ -2,7 +2,6 @@ "name": "cis-eks-t1.2.0", "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", "attributes": { - "armoBuiltin": true, "version": "v1.2.0" }, "scanningScope": { diff --git a/frameworks/cis-v1.23-t1.0.1.json b/frameworks/cis-v1.23-t1.0.1.json index a12e7c277..0c6f21fe9 100644 --- a/frameworks/cis-v1.23-t1.0.1.json +++ b/frameworks/cis-v1.23-t1.0.1.json @@ -2,7 +2,6 @@ "name": "cis-v1.23-t1.0.1", "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", "attributes": { - "armoBuiltin": true, "version": "v1.0.1" }, "scanningScope": { diff --git a/frameworks/clusterscan.json b/frameworks/clusterscan.json index 97353bb0d..26ae0f0b8 100644 --- a/frameworks/clusterscan.json +++ b/frameworks/clusterscan.json @@ -1,9 +1,7 @@ { "name": "ClusterScan", "description": "Framework for scanning a cluster", - "attributes": { - "armoBuiltin": true - }, + "attributes": {}, "typeTags": [ "security" ], diff --git a/frameworks/devopsbest.json b/frameworks/devopsbest.json index d01a4b347..299fbafe0 100644 --- a/frameworks/devopsbest.json +++ b/frameworks/devopsbest.json @@ -1,9 +1,7 @@ { "name": "DevOpsBest", "description": "", - "attributes": { - "armoBuiltin": true - }, + "attributes": {}, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/mitre.json b/frameworks/mitre.json index 510e25e75..8a5102ee1 100644 --- a/frameworks/mitre.json +++ b/frameworks/mitre.json @@ -1,9 +1,7 @@ { "name": "MITRE", "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", - "attributes": { - "armoBuiltin": true - }, + "attributes": {}, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/nsaframework.json b/frameworks/nsaframework.json index 71c731fd0..fd8ddf666 100644 --- a/frameworks/nsaframework.json +++ b/frameworks/nsaframework.json @@ -1,9 +1,7 @@ { "name": "NSA", "description": "Implement NSA security advices for K8s ", - "attributes": { - "armoBuiltin": true - }, + "attributes": {}, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/security.json b/frameworks/security.json index e02f408f2..320a514df 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -1,9 +1,7 @@ { "name": "security", "description": "Controls that are used to assess security threats.", - "attributes": { - "armoBuiltin": true - }, + "attributes": {}, "typeTags": [ "security" ], diff --git a/frameworks/soc2.json b/frameworks/soc2.json index 03aa66125..822e31841 100644 --- a/frameworks/soc2.json +++ b/frameworks/soc2.json @@ -1,9 +1,7 @@ { "name": "SOC2", "description": "SOC2 compliance related controls", - "attributes": { - "armoBuiltin": true - }, + "attributes": {}, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/workloadscan.json b/frameworks/workloadscan.json index f1f8a868c..85f3a71e0 100644 --- a/frameworks/workloadscan.json +++ b/frameworks/workloadscan.json @@ -1,9 +1,7 @@ { "name": "WorkloadScan", "description": "Framework for scanning a workload", - "attributes": { - "armoBuiltin": true - }, + "attributes": {}, "typeTags": [ "security" ], From 35ec9c1cb7130a44000012a0353c4aa9df608bfd Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Sun, 14 Apr 2024 14:33:34 +0300 Subject: [PATCH 163/195] expand rule to check TCPRoute, UDPRoute Signed-off-by: YiscahLevySilas1 --- controls/C-0266-exposuretointernet-gateway.json | 4 ++-- rules/exposure-to-internet-via-gateway-api/raw.rego | 3 ++- .../exposure-to-internet-via-gateway-api/rule.metadata.json | 6 ++++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/controls/C-0266-exposuretointernet-gateway.json b/controls/C-0266-exposuretointernet-gateway.json index b97dc336c..ffce0fc0b 100644 --- a/controls/C-0266-exposuretointernet-gateway.json +++ b/controls/C-0266-exposuretointernet-gateway.json @@ -19,10 +19,10 @@ } ] }, - "description": "This control detect workloads that are exposed on Internet through a Gateway API (using an HTTPRoute). It fails in case it find workloads connected with these resources.", + "description": "This control detect workloads that are exposed on Internet through a Gateway API (HTTPRoute,TCPRoute, UDPRoute). It fails in case it find workloads connected with these resources.", "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", "rulesNames": ["exposure-to-internet-via-gateway-api"], - "test": "Checks if workloads are exposed through the use of Gateway API (using an HTTPRoute).", + "test": "Checks if workloads are exposed through the use of Gateway API (HTTPRoute,TCPRoute, UDPRoute).", "controlID": "C-0266", "baseScore": 7.0, "scanningScope": { diff --git a/rules/exposure-to-internet-via-gateway-api/raw.rego b/rules/exposure-to-internet-via-gateway-api/raw.rego index a6173c363..0fac5da2d 100644 --- a/rules/exposure-to-internet-via-gateway-api/raw.rego +++ b/rules/exposure-to-internet-via-gateway-api/raw.rego @@ -1,9 +1,10 @@ package armo_builtins +import future.keywords.in deny[msga] { httproute := input[_] - httproute.kind == "HTTPRoute" + httproute.kind in ["HTTPRoute", "TCPRoute", "UDPRoute"] svc := input[_] svc.kind == "Service" diff --git a/rules/exposure-to-internet-via-gateway-api/rule.metadata.json b/rules/exposure-to-internet-via-gateway-api/rule.metadata.json index b2d1a4818..51dda9e8a 100644 --- a/rules/exposure-to-internet-via-gateway-api/rule.metadata.json +++ b/rules/exposure-to-internet-via-gateway-api/rule.metadata.json @@ -47,10 +47,12 @@ "gateway.networking.k8s.io" ], "apiVersions": [ - "v1" + "v1", "v1alpha2" ], "resources": [ - "HTTPRoute" + "HTTPRoute", + "TCPRoute", + "UDPRoute" ] } ], From 9668605dee7b895904bf3700110ce80488cff2e5 Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Fri, 19 Apr 2024 12:42:53 +0300 Subject: [PATCH 164/195] re-add attack track external-workload-with-cluster-takeover-roles Signed-off-by: YiscahLevySilas1 --- ...-workload-with-cluster-takeover-roles.json | 20 +++++++++++++++++++ controls/C-0256-exposuretointernet.json | 6 ++++++ .../C-0266-exposuretointernet-gateway.json | 6 ++++++ ...0267-workloadwithclustertakeoverroles.json | 17 ++++++++++++---- 4 files changed, 45 insertions(+), 4 deletions(-) create mode 100644 attack-tracks/external-workload-with-cluster-takeover-roles.json diff --git a/attack-tracks/external-workload-with-cluster-takeover-roles.json b/attack-tracks/external-workload-with-cluster-takeover-roles.json new file mode 100644 index 000000000..d12d0a139 --- /dev/null +++ b/attack-tracks/external-workload-with-cluster-takeover-roles.json @@ -0,0 +1,20 @@ +{ + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "external-workload-with-cluster-takeover-roles" + }, + "spec": { + "version": "1.0", + "data": { + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", + "subSteps": [ + { + "name": "Cluster Access", + "description": "An attacker has access to sensitive information and can leverage them by creating pods in the cluster." + } + ] + } + } +} \ No newline at end of file diff --git a/controls/C-0256-exposuretointernet.json b/controls/C-0256-exposuretointernet.json index 8c9776554..0abb72f5b 100644 --- a/controls/C-0256-exposuretointernet.json +++ b/controls/C-0256-exposuretointernet.json @@ -17,6 +17,12 @@ "Initial Access" ] }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, { "attackTrack": "external-database-without-authentication", "categories": [ diff --git a/controls/C-0266-exposuretointernet-gateway.json b/controls/C-0266-exposuretointernet-gateway.json index ffce0fc0b..776d1cef7 100644 --- a/controls/C-0266-exposuretointernet-gateway.json +++ b/controls/C-0266-exposuretointernet-gateway.json @@ -16,6 +16,12 @@ "categories": [ "Initial Access" ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] } ] }, diff --git a/controls/C-0267-workloadwithclustertakeoverroles.json b/controls/C-0267-workloadwithclustertakeoverroles.json index 708016d0c..8ab37b83d 100644 --- a/controls/C-0267-workloadwithclustertakeoverroles.json +++ b/controls/C-0267-workloadwithclustertakeoverroles.json @@ -4,7 +4,16 @@ "controlTypeTags": [ "security" ], - "attackTracks": [] + "attackTracks": [ + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Cluster Access" + ], + "displayRelatedResources": true, + "clickableResourceKind": "ServiceAccount" + } + ] }, "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", @@ -16,12 +25,12 @@ "controlID": "C-0267", "baseScore": 6.0, "category": { - "name" : "Workload" - }, + "name": "Workload" + }, "scanningScope": { "matches": [ "cluster", "file" ] } -} +} \ No newline at end of file From a97dbf95163759ba99dc15f28be41cf6b94f6e8d Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Fri, 19 Apr 2024 12:53:32 +0300 Subject: [PATCH 165/195] update k8s supported versions Signed-off-by: YiscahLevySilas1 --- rules/outdated-k8s-version/raw.rego | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/outdated-k8s-version/raw.rego b/rules/outdated-k8s-version/raw.rego index f352d0fd3..f592d87ce 100644 --- a/rules/outdated-k8s-version/raw.rego +++ b/rules/outdated-k8s-version/raw.rego @@ -18,7 +18,7 @@ deny[msga] { has_outdated_version(version) { # the `supported_k8s_versions` is validated in the validations script against "https://api.github.com/repos/kubernetes/kubernetes/releases" - supported_k8s_versions := ["v1.29", "v1.28", "v1.27"] + supported_k8s_versions := ["v1.30", "v1.29", "v1.28"] every v in supported_k8s_versions{ not startswith(version, v) } From 3308ca6027c9109a07a9cee41398c78695d95cc4 Mon Sep 17 00:00:00 2001 From: kooomix Date: Sun, 21 Apr 2024 11:53:52 +0300 Subject: [PATCH 166/195] Add data in rest encryption control (C-0264) to security.json Signed-off-by: kooomix --- frameworks/security.json | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/frameworks/security.json b/frameworks/security.json index b222a4672..7ee901f62 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -176,6 +176,12 @@ "name": "Anonymous access enabled" } }, + { + "controlID": "C-0264", + "patch": { + "name": "Data in rest encryption - Persistent Volumes are encrypted (CC1.1,CC6.7)" + } + }, { "controlID": "C-0265", "patch": { From 24089bc1152f44ac5796f5838e0779ab536c18da Mon Sep 17 00:00:00 2001 From: kooomix Date: Sun, 21 Apr 2024 11:59:46 +0300 Subject: [PATCH 167/195] Add controlTypeTags to C-0262-anonymousaccessisenabled.json and update control name in security.json Signed-off-by: kooomix --- controls/C-0262-anonymousaccessisenabled.json | 4 ++++ frameworks/security.json | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/controls/C-0262-anonymousaccessisenabled.json b/controls/C-0262-anonymousaccessisenabled.json index c82021bd9..1479170b2 100644 --- a/controls/C-0262-anonymousaccessisenabled.json +++ b/controls/C-0262-anonymousaccessisenabled.json @@ -5,6 +5,10 @@ "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] }, "rulesNames": [ "anonymous-access-enabled" diff --git a/frameworks/security.json b/frameworks/security.json index 7ee901f62..4f737e29a 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -179,7 +179,7 @@ { "controlID": "C-0264", "patch": { - "name": "Data in rest encryption - Persistent Volumes are encrypted (CC1.1,CC6.7)" + "name": "PersistentVolume without encyption" } }, { From 5d205cbb35840641dd03b42d37cba1b0ec9735ad Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Mon, 6 May 2024 12:01:24 +0300 Subject: [PATCH 168/195] add func GetControlFrameworkSubsections Signed-off-by: YiscahLevySilas1 --- gitregostore/gitstoremethods.go | 50 ++++++++++++++++++++++++++++ gitregostore/gitstoremethods_test.go | 22 ++++++++++++ 2 files changed, 72 insertions(+) diff --git a/gitregostore/gitstoremethods.go b/gitregostore/gitstoremethods.go index 33ae79512..eb44a6971 100644 --- a/gitregostore/gitstoremethods.go +++ b/gitregostore/gitstoremethods.go @@ -7,6 +7,7 @@ import ( "github.com/armosec/armoapi-go/armotypes" "github.com/go-gota/gota/dataframe" "github.com/go-gota/gota/series" + "github.com/kubescape/opa-utils/reporthandling" opapolicy "github.com/kubescape/opa-utils/reporthandling" "github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1" "k8s.io/utils/strings/slices" @@ -301,6 +302,55 @@ func (gs *GitRegoStore) GetOpaFrameworkListByControlID(controlID string) []strin return frameworksNameList } +// GetControlFrameworkSubsections returns all subsections of a control in a framework +func (gs *GitRegoStore) GetControlFrameworkSubsections(controlID string, frameworkName string) ([]string, error) { + gs.frameworksLock.RLock() + defer gs.frameworksLock.RUnlock() + gs.controlsLock.RLock() + defer gs.controlsLock.RUnlock() + + fw, err := gs.getOPAFrameworkByName(frameworkName) // doesn't lock framework + if err != nil { + return nil, err + } + + control, err := gs.getOPAControlByID(controlID) // doesn't lock control + if err != nil { + return nil, err + } + + fwSubsectionIDs := make([]string, 0) + subsections := fw.SubSections + + for i := range subsections { + fwSubsectionIDs = gs.getControlFrameworkSubSections(fwSubsectionIDs, control.ControlID, subsections[i]) + } + + return fwSubsectionIDs, nil +} + +func (gs *GitRegoStore) getControlFrameworkSubSections(fwSubsectionIDs []string, controlID string, section *reporthandling.FrameworkSubSection) []string { + // Return the current list if the section is nil + if section == nil { + return fwSubsectionIDs + } + + // Recursively gather IDs from subsections + if section.SubSections != nil { + for _, subSection := range section.SubSections { + // Update fwSubsectionIDs with the result of the recursive call + fwSubsectionIDs = gs.getControlFrameworkSubSections(fwSubsectionIDs, controlID, subSection) + } + } + + // Append the current section ID if it contains the controlID + if section.ControlIDs != nil && slices.Contains(section.ControlIDs, controlID) { + fwSubsectionIDs = append(fwSubsectionIDs, section.ID) + } + + return fwSubsectionIDs +} + // =============================================================== // =========================== Frameworks ======================== // =============================================================== diff --git a/gitregostore/gitstoremethods_test.go b/gitregostore/gitstoremethods_test.go index b25797925..f22dc774c 100644 --- a/gitregostore/gitstoremethods_test.go +++ b/gitregostore/gitstoremethods_test.go @@ -220,6 +220,28 @@ func gs_tests(t *testing.T, gs *GitRegoStore) { "wrong control for framework name 'NSA' and control name 'Allow privilege escalation' expected: 'C-0016', found %s", control.ControlID, ) }) + + t.Run("should retrieve list of fw subsections IDs", func(t *testing.T) { + t.Parallel() + + subsectionsIDs, err := gs.GetControlFrameworkSubsections("C-0067", "cis-eks-t1.2.0") + require.NoError(t, err) + require.NotEmptyf(t, subsectionsIDs, + "failed to get subsections ids list for control 'C-0067' in framework name 'cis-eks-t1.2.0' %v", err, + ) + assert.ElementsMatch(t, []string{"2.1"}, subsectionsIDs) + + t.Run("should retrieve fw subsection by ID", func(t *testing.T) { + t.Parallel() + + subsectionsIDs, err := gs.GetControlFrameworkSubsections("C-0167", "cis-aks-t1.2.0") + assert.NoError(t, err) + require.NotEmptyf(t, subsectionsIDs, + "failed to get subsections ids list for control 'C-0167' in framework name 'cis-aks-t1.2.0' %v", err, + ) + assert.ElementsMatch(t, []string{"3.1"}, subsectionsIDs) + }) + }) } func TestGetPoliciesMethodsNewV2(t *testing.T) { From 75f16b20d73b6d1fdf6ece60ca6249f16a29352a Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Thu, 16 May 2024 10:08:29 +0300 Subject: [PATCH 169/195] match roleref kind with role kind Signed-off-by: YiscahLevySilas1 --- rules/workload-with-cluster-takeover-roles/raw.rego | 1 + 1 file changed, 1 insertion(+) diff --git a/rules/workload-with-cluster-takeover-roles/raw.rego b/rules/workload-with-cluster-takeover-roles/raw.rego index 8b1e1836b..c84463cb8 100644 --- a/rules/workload-with-cluster-takeover-roles/raw.rego +++ b/rules/workload-with-cluster-takeover-roles/raw.rego @@ -23,6 +23,7 @@ deny[msga] { rolebinding := input[_] rolebinding.kind in ["RoleBinding", "ClusterRoleBinding"] rolebinding.roleRef.name == role.metadata.name + rolebinding.roleRef.kind == role.kind rolebinding.subjects[j].kind == "ServiceAccount" rolebinding.subjects[j].name == sa.metadata.name rolebinding.subjects[j].namespace == sa.metadata.namespace From f1c7f066db786267f890e8ea1fc864a5774b9f40 Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Mon, 27 May 2024 09:52:24 +0300 Subject: [PATCH 170/195] remove api version list Signed-off-by: David Wertenteil --- rules/exposure-to-internet-via-gateway-api/rule.metadata.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rules/exposure-to-internet-via-gateway-api/rule.metadata.json b/rules/exposure-to-internet-via-gateway-api/rule.metadata.json index 51dda9e8a..e293701d0 100644 --- a/rules/exposure-to-internet-via-gateway-api/rule.metadata.json +++ b/rules/exposure-to-internet-via-gateway-api/rule.metadata.json @@ -47,7 +47,7 @@ "gateway.networking.k8s.io" ], "apiVersions": [ - "v1", "v1alpha2" + "*" ], "resources": [ "HTTPRoute", @@ -56,7 +56,7 @@ ] } ], - "description": "fails in case the running workload has binded Service and Gateway that are exposing it on Internet.", + "description": "fails if the running workload is bound to a Service that is exposed to the Internet through a Gateway.", "remediation": "", "ruleQuery": "armo_builtins" } From b20f3d6f5b70f467854c04c38b929c2c4c73816e Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Mon, 27 May 2024 18:35:17 +0300 Subject: [PATCH 171/195] Adding useFromKubescapeVersion Signed-off-by: David Wertenteil --- rules/exposure-to-internet-via-gateway-api/rule.metadata.json | 1 + 1 file changed, 1 insertion(+) diff --git a/rules/exposure-to-internet-via-gateway-api/rule.metadata.json b/rules/exposure-to-internet-via-gateway-api/rule.metadata.json index e293701d0..54b1aaad3 100644 --- a/rules/exposure-to-internet-via-gateway-api/rule.metadata.json +++ b/rules/exposure-to-internet-via-gateway-api/rule.metadata.json @@ -1,6 +1,7 @@ { "name": "exposure-to-internet-via-gateway-api", "attributes": { + "useFromKubescapeVersion": "v3.0.9" }, "ruleLanguage": "Rego", "match": [ From f562ac82ecd52e436c476cae710691d9f1d3e5ac Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Tue, 28 May 2024 09:29:19 +0300 Subject: [PATCH 172/195] exclude ks labels Signed-off-by: David Wertenteil --- exceptions/kubescape.json | 45 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/exceptions/kubescape.json b/exceptions/kubescape.json index 34b2187ed..7f9db06e9 100644 --- a/exceptions/kubescape.json +++ b/exceptions/kubescape.json @@ -1,4 +1,49 @@ [ + { + "name": "kubescape-ignore", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "true" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "yes" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "1" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "enable" + } + }, + { + "designatorType": "Attributes", + "attributes": { + "kubescape.io/ignore": "enabled" + } + } + ], + "posturePolicies": [ + {} + ] + }, { "name": "exclude-kubescape-deployment-security-context", "policyType": "postureExceptionPolicy", From 466d963583bfa3ea8b7f9559db80384fdcde9da2 Mon Sep 17 00:00:00 2001 From: David Wertenteil Date: Wed, 29 May 2024 19:02:38 +0300 Subject: [PATCH 173/195] Update kubescape.json Signed-off-by: David Wertenteil --- exceptions/kubescape.json | 45 --------------------------------------- 1 file changed, 45 deletions(-) diff --git a/exceptions/kubescape.json b/exceptions/kubescape.json index 7f9db06e9..34b2187ed 100644 --- a/exceptions/kubescape.json +++ b/exceptions/kubescape.json @@ -1,49 +1,4 @@ [ - { - "name": "kubescape-ignore", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "true" - } - }, - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "yes" - } - }, - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "1" - } - }, - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "enable" - } - }, - { - "designatorType": "Attributes", - "attributes": { - "kubescape.io/ignore": "enabled" - } - } - ], - "posturePolicies": [ - {} - ] - }, { "name": "exclude-kubescape-deployment-security-context", "policyType": "postureExceptionPolicy", From 31eee67066f44293b6e27e635d7a7eb1c0c29511 Mon Sep 17 00:00:00 2001 From: kooomix Date: Mon, 3 Jun 2024 16:33:56 +0300 Subject: [PATCH 174/195] feat: Update storage class path in pv-without-encryption rule Signed-off-by: kooomix --- rules/pv-without-encryption/raw.rego | 2 +- .../test/eks/expected.json | 27 ++++++++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/rules/pv-without-encryption/raw.rego b/rules/pv-without-encryption/raw.rego index 96b62c375..cc9233d9f 100644 --- a/rules/pv-without-encryption/raw.rego +++ b/rules/pv-without-encryption/raw.rego @@ -18,7 +18,7 @@ deny[msga] { "packagename": "armo_builtins", "failedPaths": [], "fixPaths": [{ - "path": "pv.spec.storageClassName", + "path": "spec.storageClassName", "value": "" }], "alertScore": 7, diff --git a/rules/pv-without-encryption/test/eks/expected.json b/rules/pv-without-encryption/test/eks/expected.json index 2654377a9..cfd549621 100644 --- a/rules/pv-without-encryption/test/eks/expected.json +++ b/rules/pv-without-encryption/test/eks/expected.json @@ -1 +1,26 @@ -[{"alertMessage":"Volume 'pvc-0eeeeefe-5193-472c-a81e-104f3919130e' has is using a storage class that does not use encryption","failedPaths":[],"fixPaths":[{"path":"pv.spec.storageClassName","value":"\u003cyour encrypted storage class\u003e"}],"ruleStatus":"","packagename":"armo_builtins","alertScore":7,"alertObject":{"k8sApiObjects":[{"apiVersion":"v1","kind":"PersistentVolume","metadata":{"name":"pvc-0eeeeefe-5193-472c-a81e-104f3919130e"}}]}}] \ No newline at end of file +[ + { + "alertMessage": "Volume 'pvc-0eeeeefe-5193-472c-a81e-104f3919130e' has is using a storage class that does not use encryption", + "failedPaths": [], + "fixPaths": [ + { + "path": "spec.storageClassName", + "value": "\u003cyour encrypted storage class\u003e" + } + ], + "ruleStatus": "", + "packagename": "armo_builtins", + "alertScore": 7, + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "PersistentVolume", + "metadata": { + "name": "pvc-0eeeeefe-5193-472c-a81e-104f3919130e" + } + } + ] + } + } +] \ No newline at end of file From d01a7f7437f5f5c93bf7f6d5f03b3f105dc13d3e Mon Sep 17 00:00:00 2001 From: YiscahLevySilas1 Date: Tue, 4 Jun 2024 12:31:15 +0300 Subject: [PATCH 175/195] add builtin attribute to fw Signed-off-by: YiscahLevySilas1 --- frameworks/__YAMLscan.json | 4 +++- frameworks/allcontrols.json | 4 +++- frameworks/armobest.json | 4 +++- frameworks/cis-aks-t1.2.0.json | 3 ++- frameworks/cis-eks-t1.2.0.json | 3 ++- frameworks/cis-v1.23-t1.0.1.json | 3 ++- frameworks/clusterscan.json | 4 +++- frameworks/devopsbest.json | 4 +++- frameworks/mitre.json | 4 +++- frameworks/nsaframework.json | 4 +++- frameworks/security.json | 4 +++- frameworks/soc2.json | 4 +++- frameworks/workloadscan.json | 4 +++- 13 files changed, 36 insertions(+), 13 deletions(-) diff --git a/frameworks/__YAMLscan.json b/frameworks/__YAMLscan.json index 31896edd4..353b3ae30 100644 --- a/frameworks/__YAMLscan.json +++ b/frameworks/__YAMLscan.json @@ -1,7 +1,9 @@ { "name": "YAML-scanning", "description": "Controls relevant to yamls", - "attributes": {}, + "attributes": { + "builtin": true + }, "scanningScope": { "matches": [ "file" diff --git a/frameworks/allcontrols.json b/frameworks/allcontrols.json index 453f81327..d0a25500d 100644 --- a/frameworks/allcontrols.json +++ b/frameworks/allcontrols.json @@ -1,7 +1,9 @@ { "name": "AllControls", "description": "Contains all the controls from all the frameworks", - "attributes": {}, + "attributes": { + "builtin": true + }, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/armobest.json b/frameworks/armobest.json index 83e0d58be..abda35185 100644 --- a/frameworks/armobest.json +++ b/frameworks/armobest.json @@ -1,7 +1,9 @@ { "name": "ArmoBest", "description": "", - "attributes": {}, + "attributes": { + "builtin": true + }, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/cis-aks-t1.2.0.json b/frameworks/cis-aks-t1.2.0.json index f0a07ad97..2bf60aa45 100644 --- a/frameworks/cis-aks-t1.2.0.json +++ b/frameworks/cis-aks-t1.2.0.json @@ -2,7 +2,8 @@ "name": "cis-aks-t1.2.0", "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", "attributes": { - "version": "v1.2.0" + "version": "v1.2.0", + "builtin": true }, "scanningScope": { "matches": [ diff --git a/frameworks/cis-eks-t1.2.0.json b/frameworks/cis-eks-t1.2.0.json index a1c49b3f2..eb9ddbf8b 100644 --- a/frameworks/cis-eks-t1.2.0.json +++ b/frameworks/cis-eks-t1.2.0.json @@ -2,7 +2,8 @@ "name": "cis-eks-t1.2.0", "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", "attributes": { - "version": "v1.2.0" + "version": "v1.2.0", + "builtin": true }, "scanningScope": { "matches": [ diff --git a/frameworks/cis-v1.23-t1.0.1.json b/frameworks/cis-v1.23-t1.0.1.json index 0c6f21fe9..d9c28f10d 100644 --- a/frameworks/cis-v1.23-t1.0.1.json +++ b/frameworks/cis-v1.23-t1.0.1.json @@ -2,7 +2,8 @@ "name": "cis-v1.23-t1.0.1", "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", "attributes": { - "version": "v1.0.1" + "version": "v1.0.1", + "builtin": true }, "scanningScope": { "matches": [ diff --git a/frameworks/clusterscan.json b/frameworks/clusterscan.json index 26ae0f0b8..9dd5f73ef 100644 --- a/frameworks/clusterscan.json +++ b/frameworks/clusterscan.json @@ -1,7 +1,9 @@ { "name": "ClusterScan", "description": "Framework for scanning a cluster", - "attributes": {}, + "attributes": { + "builtin": true + }, "typeTags": [ "security" ], diff --git a/frameworks/devopsbest.json b/frameworks/devopsbest.json index 299fbafe0..cb1ba3f0f 100644 --- a/frameworks/devopsbest.json +++ b/frameworks/devopsbest.json @@ -1,7 +1,9 @@ { "name": "DevOpsBest", "description": "", - "attributes": {}, + "attributes": { + "builtin": true + }, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/mitre.json b/frameworks/mitre.json index 8a5102ee1..c8e5e2194 100644 --- a/frameworks/mitre.json +++ b/frameworks/mitre.json @@ -1,7 +1,9 @@ { "name": "MITRE", "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", - "attributes": {}, + "attributes": { + "builtin": true + }, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/nsaframework.json b/frameworks/nsaframework.json index fd8ddf666..62f30f273 100644 --- a/frameworks/nsaframework.json +++ b/frameworks/nsaframework.json @@ -1,7 +1,9 @@ { "name": "NSA", "description": "Implement NSA security advices for K8s ", - "attributes": {}, + "attributes": { + "builtin": true + }, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/security.json b/frameworks/security.json index b82ac1d5c..0b1651f6f 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -1,7 +1,9 @@ { "name": "security", "description": "Controls that are used to assess security threats.", - "attributes": {}, + "attributes": { + "builtin": true + }, "typeTags": [ "security" ], diff --git a/frameworks/soc2.json b/frameworks/soc2.json index 822e31841..98d27e4db 100644 --- a/frameworks/soc2.json +++ b/frameworks/soc2.json @@ -1,7 +1,9 @@ { "name": "SOC2", "description": "SOC2 compliance related controls", - "attributes": {}, + "attributes": { + "builtin": true + }, "scanningScope": { "matches": [ "cluster", diff --git a/frameworks/workloadscan.json b/frameworks/workloadscan.json index 69c8015da..c3f1a0ec3 100644 --- a/frameworks/workloadscan.json +++ b/frameworks/workloadscan.json @@ -1,7 +1,9 @@ { "name": "WorkloadScan", "description": "Framework for scanning a workload", - "attributes": {}, + "attributes": { + "builtin": true + }, "typeTags": [ "security" ], From 8908a0e5a73a0a9925347daf064fa389b3a173aa Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Tue, 9 Jul 2024 11:11:57 +0300 Subject: [PATCH 176/195] Adding unauthenticated service control Signed-off-by: Amit Schendel --- controls/C-0274-unauthenticatedservice.json | 27 ++++++ frameworks/security.json | 6 ++ rules/unauthenticated-service/raw.rego | 50 +++++++++++ .../rule.metadata.json | 61 ++++++++++++++ .../test/service/expected.json | 83 +++++++++++++++++++ .../test/service/input/operator.yaml | 18 ++++ .../test/service/input/pod.yaml | 17 ++++ .../test/service/input/service.yaml | 12 +++ 8 files changed, 274 insertions(+) create mode 100644 controls/C-0274-unauthenticatedservice.json create mode 100644 rules/unauthenticated-service/raw.rego create mode 100644 rules/unauthenticated-service/rule.metadata.json create mode 100644 rules/unauthenticated-service/test/service/expected.json create mode 100644 rules/unauthenticated-service/test/service/input/operator.yaml create mode 100644 rules/unauthenticated-service/test/service/input/pod.yaml create mode 100644 rules/unauthenticated-service/test/service/input/service.yaml diff --git a/controls/C-0274-unauthenticatedservice.json b/controls/C-0274-unauthenticatedservice.json new file mode 100644 index 000000000..4fe73726d --- /dev/null +++ b/controls/C-0274-unauthenticatedservice.json @@ -0,0 +1,27 @@ +{ + "controlID": "C-0274", + "name": "Verify Authenticated Service", + "description": "Verifies if the service is authenticated", + "long_description": "Verifies that in order to access the service, the user must be authenticated.", + "remediation": "Configure the service to require authentication.", + "manual_test": "", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "rulesNames": [ + "unauthenticated-service" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "category": { + "name": "Network" + }, + "scanningScope": { + "matches": [ + "cluster" + ] + } +} \ No newline at end of file diff --git a/frameworks/security.json b/frameworks/security.json index 0b1651f6f..2bdf84494 100644 --- a/frameworks/security.json +++ b/frameworks/security.json @@ -223,6 +223,12 @@ "patch": { "name": "Exposure to internet via Gateway API" } + }, + { + "controlID": "C-0274", + "patch": { + "name": "Verify Authenticated Service" + } } ] } \ No newline at end of file diff --git a/rules/unauthenticated-service/raw.rego b/rules/unauthenticated-service/raw.rego new file mode 100644 index 000000000..9638b898d --- /dev/null +++ b/rules/unauthenticated-service/raw.rego @@ -0,0 +1,50 @@ +package armo_builtins + +import future.keywords.contains +import future.keywords.if + +deny contains msga if { + service := input[_] + service.kind == "Service" + + wl := input[_] + spec_template_spec_patterns := {"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Pod", "Job", "CronJob"} + spec_template_spec_patterns[wl.kind] + wl_connected_to_service(wl, service) + + service_scan_result := input[_] + service_scan_result.kind == "ServiceScanResult" + service_name := service.metadata.name + has_unauthenticated_service(service_name, service.metadata.namespace, service_scan_result) + + # Path to the workload spec. + path := "spec" + + msga := { + "alertMessage": sprintf("Unauthenticated service %v exposes %v", [service_name, wl.metadata.name]), + "alertScore": 7, + "fixPaths": [], + "reviewPaths": [path], + "failedPaths": [path], + "packagename": "armo_builtins", + "alertObject": {"k8sApiObjects": [wl]}, + "relatedObjects": [ + {"object": service}, + {"object": service_scan_result}, + ], + } +} + +has_unauthenticated_service(service_name, namespace, service_scan_result) if { + service_scan_result.metadata.name == service_name + service_scan_result.metadata.namespace == namespace + service_scan_result.spec.ports[_].authenticated == false +} + +wl_connected_to_service(wl, svc) if { + count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector) +} + +wl_connected_to_service(wl, svc) if { + wl.spec.selector.matchLabels == svc.spec.selector +} diff --git a/rules/unauthenticated-service/rule.metadata.json b/rules/unauthenticated-service/rule.metadata.json new file mode 100644 index 000000000..054326f7b --- /dev/null +++ b/rules/unauthenticated-service/rule.metadata.json @@ -0,0 +1,61 @@ +{ + "name": "unauthenticated-service", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "apiextensions.k8s.io/v1" + ], + "apiVersions": [ + "CustomResourceDefinition" + ], + "resources": [ + "ServiceScanResult" + ] + } + ], + "dynamicMatch": [ + ], + "ruleDependencies": [], + "description": "Verifies that the service is authenticated", + "remediation": "Add authentication to the service", + "ruleQuery": "armo_builtins" +} \ No newline at end of file diff --git a/rules/unauthenticated-service/test/service/expected.json b/rules/unauthenticated-service/test/service/expected.json new file mode 100644 index 000000000..2ae4b64ff --- /dev/null +++ b/rules/unauthenticated-service/test/service/expected.json @@ -0,0 +1,83 @@ +{ + "alertMessage": "Unauthenticated service operator exposes operator", + "alertObject": { + "k8sApiObjects": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "operator", + "labels": { + "app": "operator" + } + } + } + ] + }, + "alertScore": 7, + "deletePaths": null, + "failedPaths": ["spec"], + "fixPaths": [], + "packagename": "armo_builtins", + "relatedObjects": [ + { + "deletePaths": null, + "failedPaths": null, + "fixPaths": null, + "object": { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "name": "operator", + "namespace": "kubescape" + }, + "spec": { + "ports": [ + { + "port": 4002, + "protocol": "TCP", + "targetPort": 8080 + } + ], + "selector": { + "app": "operator" + } + } + }, + "reviewPaths": null + }, + { + "deletePaths": null, + "failedPaths": null, + "fixPaths": null, + "object": { + "apiVersion": "kubescape.io/v1", + "kind": "ServiceScanResult", + "metadata": { + "creationTimestamp": "2024-07-03T04:40:17Z", + "generation": 4, + "name": "operator", + "namespace": "kubescape", + "resourceVersion": "2772", + "uid": "24dc622d-ee78-40c2-8654-2a5604715f95" + }, + "spec": { + "clusterIP": "10.103.207.220", + "ports": [ + { + "applicationLayer": "", + "authenticated": false, + "port": 4002, + "presentationLayer": "http", + "protocol": "TCP", + "sessionLayer": "tcp" + } + ] + } + }, + "reviewPaths": null + } + ], + "reviewPaths": ["spec"], + "ruleStatus": "" +} \ No newline at end of file diff --git a/rules/unauthenticated-service/test/service/input/operator.yaml b/rules/unauthenticated-service/test/service/input/operator.yaml new file mode 100644 index 000000000..2905008e7 --- /dev/null +++ b/rules/unauthenticated-service/test/service/input/operator.yaml @@ -0,0 +1,18 @@ +apiVersion: kubescape.io/v1 +kind: ServiceScanResult +metadata: + creationTimestamp: "2024-07-03T04:40:17Z" + generation: 4 + name: operator + namespace: kubescape + resourceVersion: "2772" + uid: 24dc622d-ee78-40c2-8654-2a5604715f95 +spec: + clusterIP: 10.103.207.220 + ports: + - applicationLayer: "" + authenticated: false + port: 4002 + presentationLayer: http + protocol: TCP + sessionLayer: tcp diff --git a/rules/unauthenticated-service/test/service/input/pod.yaml b/rules/unauthenticated-service/test/service/input/pod.yaml new file mode 100644 index 000000000..f91f89733 --- /dev/null +++ b/rules/unauthenticated-service/test/service/input/pod.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: operator + namespace: kubescape + labels: + app: operator +spec: + containers: + - name: operator-container + image: your-operator-image:latest + ports: + - containerPort: 8080 + resources: + limits: + cpu: "1" + memory: "1Gi" \ No newline at end of file diff --git a/rules/unauthenticated-service/test/service/input/service.yaml b/rules/unauthenticated-service/test/service/input/service.yaml new file mode 100644 index 000000000..9d2ae271e --- /dev/null +++ b/rules/unauthenticated-service/test/service/input/service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: operator + namespace: kubescape +spec: + selector: + app: operator + ports: + - protocol: TCP + port: 4002 + targetPort: 8080 \ No newline at end of file From 96cc0fbe9594f2dce8fffd412727c050bf3a1a74 Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Wed, 10 Jul 2024 10:28:06 +0300 Subject: [PATCH 177/195] Adding yamls that doesn't fail in the test Signed-off-by: Amit Schendel --- .../test/service/expected.json | 5 +++-- .../test/service/input/operator2.yaml | 18 ++++++++++++++++++ .../test/service/input/pod2.yaml | 17 +++++++++++++++++ .../test/service/input/service2.yaml | 12 ++++++++++++ 4 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 rules/unauthenticated-service/test/service/input/operator2.yaml create mode 100644 rules/unauthenticated-service/test/service/input/pod2.yaml create mode 100644 rules/unauthenticated-service/test/service/input/service2.yaml diff --git a/rules/unauthenticated-service/test/service/expected.json b/rules/unauthenticated-service/test/service/expected.json index 2ae4b64ff..033ae1ea6 100644 --- a/rules/unauthenticated-service/test/service/expected.json +++ b/rules/unauthenticated-service/test/service/expected.json @@ -1,4 +1,4 @@ -{ +[{ "alertMessage": "Unauthenticated service operator exposes operator", "alertObject": { "k8sApiObjects": [ @@ -80,4 +80,5 @@ ], "reviewPaths": ["spec"], "ruleStatus": "" -} \ No newline at end of file +} +] \ No newline at end of file diff --git a/rules/unauthenticated-service/test/service/input/operator2.yaml b/rules/unauthenticated-service/test/service/input/operator2.yaml new file mode 100644 index 000000000..96a495616 --- /dev/null +++ b/rules/unauthenticated-service/test/service/input/operator2.yaml @@ -0,0 +1,18 @@ +apiVersion: kubescape.io/v1 +kind: ServiceScanResult +metadata: + creationTimestamp: "2024-07-03T04:40:17Z" + generation: 4 + name: operator2 + namespace: kubescape + resourceVersion: "2772" + uid: 24dc622d-ee78-40c2-8654-2a5604715f95 +spec: + clusterIP: 10.103.207.220 + ports: + - applicationLayer: "" + authenticated: true + port: 4002 + presentationLayer: http + protocol: TCP + sessionLayer: tcp diff --git a/rules/unauthenticated-service/test/service/input/pod2.yaml b/rules/unauthenticated-service/test/service/input/pod2.yaml new file mode 100644 index 000000000..160f56455 --- /dev/null +++ b/rules/unauthenticated-service/test/service/input/pod2.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: operator2 + namespace: kubescape + labels: + app: operator2 +spec: + containers: + - name: operator-container + image: your-operator-image:latest + ports: + - containerPort: 8080 + resources: + limits: + cpu: "1" + memory: "1Gi" \ No newline at end of file diff --git a/rules/unauthenticated-service/test/service/input/service2.yaml b/rules/unauthenticated-service/test/service/input/service2.yaml new file mode 100644 index 000000000..bcec62922 --- /dev/null +++ b/rules/unauthenticated-service/test/service/input/service2.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: operator2 + namespace: kubescape +spec: + selector: + app: operator2 + ports: + - protocol: TCP + port: 4002 + targetPort: 8080 \ No newline at end of file From 81e9cffc028d3814fc8984e5d5c7d594af56118d Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Thu, 11 Jul 2024 11:38:23 +0300 Subject: [PATCH 178/195] Adding attack track Signed-off-by: Amit Schendel --- controls/C-0274-unauthenticatedservice.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/controls/C-0274-unauthenticatedservice.json b/controls/C-0274-unauthenticatedservice.json index 4fe73726d..37c3802d0 100644 --- a/controls/C-0274-unauthenticatedservice.json +++ b/controls/C-0274-unauthenticatedservice.json @@ -8,6 +8,14 @@ "attributes": { "controlTypeTags": [ "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } ] }, "rulesNames": [ From 759dc32c97bbf44bf98c46cbdbb02346ea9cfcc6 Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Sun, 14 Jul 2024 11:41:58 +0300 Subject: [PATCH 179/195] Moving name to be in the top of the json Signed-off-by: Amit Schendel --- controls/C-0274-unauthenticatedservice.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controls/C-0274-unauthenticatedservice.json b/controls/C-0274-unauthenticatedservice.json index 4fe73726d..c249ef698 100644 --- a/controls/C-0274-unauthenticatedservice.json +++ b/controls/C-0274-unauthenticatedservice.json @@ -1,6 +1,6 @@ { - "controlID": "C-0274", "name": "Verify Authenticated Service", + "controlID": "C-0274", "description": "Verifies if the service is authenticated", "long_description": "Verifies that in order to access the service, the user must be authenticated.", "remediation": "Configure the service to require authentication.", From 1917ab8f6b53536df574adea3701a32b7814cf19 Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Mon, 15 Jul 2024 14:34:10 +0300 Subject: [PATCH 180/195] Fixing the metadata Signed-off-by: Amit Schendel --- rules/unauthenticated-service/rule.metadata.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rules/unauthenticated-service/rule.metadata.json b/rules/unauthenticated-service/rule.metadata.json index 054326f7b..74686b71f 100644 --- a/rules/unauthenticated-service/rule.metadata.json +++ b/rules/unauthenticated-service/rule.metadata.json @@ -42,10 +42,10 @@ }, { "apiGroups": [ - "apiextensions.k8s.io/v1" + "kubescape.io" ], "apiVersions": [ - "CustomResourceDefinition" + "v1" ], "resources": [ "ServiceScanResult" From 4f49212272d3ce72fc8828b146c76b79fdca3bc8 Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Tue, 16 Jul 2024 15:59:02 +0300 Subject: [PATCH 181/195] Adding fixed rego rule Signed-off-by: Amit Schendel --- controls/C-0274-unauthenticatedservice.json | 2 +- .../rule.metadata.json | 2 +- .../{service => fail_service}/expected.json | 0 .../input/operator.yaml | 0 .../input/operator2.yaml | 0 .../{service => fail_service}/input/pod.yaml | 0 .../{service => fail_service}/input/pod2.yaml | 0 .../input/service.yaml | 0 .../input/service2.yaml | 0 .../test/pass/expected.json | 1 + .../test/pass/input/deploy.yaml | 78 +++++++++++++++++++ .../test/pass/input/service.yaml | 31 ++++++++ .../test/pass/input/service_result.yaml | 21 +++++ 13 files changed, 133 insertions(+), 2 deletions(-) rename rules/unauthenticated-service/test/{service => fail_service}/expected.json (100%) rename rules/unauthenticated-service/test/{service => fail_service}/input/operator.yaml (100%) rename rules/unauthenticated-service/test/{service => fail_service}/input/operator2.yaml (100%) rename rules/unauthenticated-service/test/{service => fail_service}/input/pod.yaml (100%) rename rules/unauthenticated-service/test/{service => fail_service}/input/pod2.yaml (100%) rename rules/unauthenticated-service/test/{service => fail_service}/input/service.yaml (100%) rename rules/unauthenticated-service/test/{service => fail_service}/input/service2.yaml (100%) create mode 100644 rules/unauthenticated-service/test/pass/expected.json create mode 100644 rules/unauthenticated-service/test/pass/input/deploy.yaml create mode 100644 rules/unauthenticated-service/test/pass/input/service.yaml create mode 100644 rules/unauthenticated-service/test/pass/input/service_result.yaml diff --git a/controls/C-0274-unauthenticatedservice.json b/controls/C-0274-unauthenticatedservice.json index 75e1ba438..17d4e11bc 100644 --- a/controls/C-0274-unauthenticatedservice.json +++ b/controls/C-0274-unauthenticatedservice.json @@ -29,7 +29,7 @@ }, "scanningScope": { "matches": [ - "cluster" + "cluster" ] } } \ No newline at end of file diff --git a/rules/unauthenticated-service/rule.metadata.json b/rules/unauthenticated-service/rule.metadata.json index 74686b71f..3ce6e83d8 100644 --- a/rules/unauthenticated-service/rule.metadata.json +++ b/rules/unauthenticated-service/rule.metadata.json @@ -48,7 +48,7 @@ "v1" ], "resources": [ - "ServiceScanResult" + "servicesscanresults" ] } ], diff --git a/rules/unauthenticated-service/test/service/expected.json b/rules/unauthenticated-service/test/fail_service/expected.json similarity index 100% rename from rules/unauthenticated-service/test/service/expected.json rename to rules/unauthenticated-service/test/fail_service/expected.json diff --git a/rules/unauthenticated-service/test/service/input/operator.yaml b/rules/unauthenticated-service/test/fail_service/input/operator.yaml similarity index 100% rename from rules/unauthenticated-service/test/service/input/operator.yaml rename to rules/unauthenticated-service/test/fail_service/input/operator.yaml diff --git a/rules/unauthenticated-service/test/service/input/operator2.yaml b/rules/unauthenticated-service/test/fail_service/input/operator2.yaml similarity index 100% rename from rules/unauthenticated-service/test/service/input/operator2.yaml rename to rules/unauthenticated-service/test/fail_service/input/operator2.yaml diff --git a/rules/unauthenticated-service/test/service/input/pod.yaml b/rules/unauthenticated-service/test/fail_service/input/pod.yaml similarity index 100% rename from rules/unauthenticated-service/test/service/input/pod.yaml rename to rules/unauthenticated-service/test/fail_service/input/pod.yaml diff --git a/rules/unauthenticated-service/test/service/input/pod2.yaml b/rules/unauthenticated-service/test/fail_service/input/pod2.yaml similarity index 100% rename from rules/unauthenticated-service/test/service/input/pod2.yaml rename to rules/unauthenticated-service/test/fail_service/input/pod2.yaml diff --git a/rules/unauthenticated-service/test/service/input/service.yaml b/rules/unauthenticated-service/test/fail_service/input/service.yaml similarity index 100% rename from rules/unauthenticated-service/test/service/input/service.yaml rename to rules/unauthenticated-service/test/fail_service/input/service.yaml diff --git a/rules/unauthenticated-service/test/service/input/service2.yaml b/rules/unauthenticated-service/test/fail_service/input/service2.yaml similarity index 100% rename from rules/unauthenticated-service/test/service/input/service2.yaml rename to rules/unauthenticated-service/test/fail_service/input/service2.yaml diff --git a/rules/unauthenticated-service/test/pass/expected.json b/rules/unauthenticated-service/test/pass/expected.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/rules/unauthenticated-service/test/pass/expected.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/rules/unauthenticated-service/test/pass/input/deploy.yaml b/rules/unauthenticated-service/test/pass/input/deploy.yaml new file mode 100644 index 000000000..608beba22 --- /dev/null +++ b/rules/unauthenticated-service/test/pass/input/deploy.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"name":"operator-deployment","namespace":"new-namespace"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"operator"}},"template":{"metadata":{"labels":{"app":"operator"}},"spec":{"containers":[{"args":["-c","nc -lnvp 8080"[],"command":["/bin/sh"],"image":"alpine:3.18.2","name":"operator-container","ports":[{"containerPort":8080}],"volumeMounts":[{"mountPath":"/etc/config","name":"config-volume"}]}],"volumes":[{"configMap":{"name":"operator-configmap"},"name":"config-volume"}]}}}} + creationTimestamp: "2024-07-15T11:38:56Z" + generation: 1 + name: operator-deployment + namespace: new-namespace + resourceVersion: "1118651" + uid: d613b9a8-7ed8-4e0c-b80d-b14023b8d346 +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: operator + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: operator + spec: + containers: + - args: + - -c + - nc -lnvp 8080 + command: + - /bin/sh + image: alpine:3.18.2 + imagePullPolicy: IfNotPresent + name: operator-container + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config + name: config-volume + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + defaultMode: 420 + name: operator-configmap + name: config-volume +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2024-07-15T11:39:01Z" + lastUpdateTime: "2024-07-15T11:39:01Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2024-07-15T11:38:56Z" + lastUpdateTime: "2024-07-15T11:39:01Z" + message: ReplicaSet "operator-deployment-748b6d7d54" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 \ No newline at end of file diff --git a/rules/unauthenticated-service/test/pass/input/service.yaml b/rules/unauthenticated-service/test/pass/input/service.yaml new file mode 100644 index 000000000..de9e1b93f --- /dev/null +++ b/rules/unauthenticated-service/test/pass/input/service.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"name":"operator","namespace":"new-namespace"},"spec":{"ports":[{"port":4002,"protocol":"TCP","targetPort":8080}],"selector":{"app":"operator"},"type":"NodePort"}} + creationTimestamp: "2024-07-15T11:38:56Z" + name: operator + namespace: new-namespace + resourceVersion: "1118630" + uid: 9cb0d9b9-c4d7-4b48-b456-71229bdc7216 +spec: + clusterIP: 10.105.77.60 + clusterIPs: + - 10.105.77.60 + externalTrafficPolicy: Cluster + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - nodePort: 31624 + port: 4002 + protocol: TCP + targetPort: 8080 + selector: + app: operator + sessionAffinity: None + type: NodePort +status: + loadBalancer: {} \ No newline at end of file diff --git a/rules/unauthenticated-service/test/pass/input/service_result.yaml b/rules/unauthenticated-service/test/pass/input/service_result.yaml new file mode 100644 index 000000000..b46f41790 --- /dev/null +++ b/rules/unauthenticated-service/test/pass/input/service_result.yaml @@ -0,0 +1,21 @@ +apiVersion: kubescape.io/v1 +kind: ServiceScanResult +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"kubescape.io/v1","kind":"ServiceScanResult","metadata":{"annotations":{},"name":"operator","namespace":"new-namespace"},"spec":{"clusterIP":"10.103.207.220","ports":[{"applicationLayer":"","authenticated":false,"port":4002,"presentationLayer":"http","protocol":"TCP","sessionLayer":"tcp"}]}} + creationTimestamp: "2024-07-15T11:39:46Z" + generation: 1 + name: operator + namespace: new-namespace + resourceVersion: "1118691" + uid: cd049412-c329-48ce-82b8-dfa56d6e85fd +spec: + clusterIP: 10.103.207.220 + ports: + - applicationLayer: "" + authenticated: true + port: 4002 + presentationLayer: http + protocol: TCP + sessionLayer: tcp \ No newline at end of file From 3f59097a5e2027f064408f4fbf1ba96b78e2afa1 Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Tue, 16 Jul 2024 17:50:47 +0300 Subject: [PATCH 182/195] Fixing reviews and tests Signed-off-by: Amit Schendel --- rules/unauthenticated-service/raw.rego | 7 +-- .../test/fail_service/expected.json | 52 +++++++++++++++---- 2 files changed, 47 insertions(+), 12 deletions(-) diff --git a/rules/unauthenticated-service/raw.rego b/rules/unauthenticated-service/raw.rego index 9638b898d..950b484e8 100644 --- a/rules/unauthenticated-service/raw.rego +++ b/rules/unauthenticated-service/raw.rego @@ -17,7 +17,7 @@ deny contains msga if { service_name := service.metadata.name has_unauthenticated_service(service_name, service.metadata.namespace, service_scan_result) - # Path to the workload spec. + # Path to the service object path := "spec" msga := { @@ -25,10 +25,11 @@ deny contains msga if { "alertScore": 7, "fixPaths": [], "reviewPaths": [path], - "failedPaths": [path], + "failedPaths": [], "packagename": "armo_builtins", - "alertObject": {"k8sApiObjects": [wl]}, + "alertObject": {"k8sApiObjects": [service]}, "relatedObjects": [ + {"object": wl}, {"object": service}, {"object": service_scan_result}, ], diff --git a/rules/unauthenticated-service/test/fail_service/expected.json b/rules/unauthenticated-service/test/fail_service/expected.json index 033ae1ea6..bd3170cf9 100644 --- a/rules/unauthenticated-service/test/fail_service/expected.json +++ b/rules/unauthenticated-service/test/fail_service/expected.json @@ -1,25 +1,59 @@ -[{ +[ + { "alertMessage": "Unauthenticated service operator exposes operator", "alertObject": { "k8sApiObjects": [ { "apiVersion": "v1", - "kind": "Pod", + "kind": "Service", "metadata": { - "name": "operator", - "labels": { - "app": "operator" - } + "name": "operator" } } ] }, "alertScore": 7, "deletePaths": null, - "failedPaths": ["spec"], + "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", "relatedObjects": [ + { + "deletePaths": null, + "failedPaths": null, + "fixPaths": null, + "object": { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "labels": { + "app": "operator" + }, + "name": "operator", + "namespace": "kubescape" + }, + "spec": { + "containers": [ + { + "image": "your-operator-image:latest", + "name": "operator-container", + "ports": [ + { + "containerPort": 8080 + } + ], + "resources": { + "limits": { + "cpu": "1", + "memory": "1Gi" + } + } + } + ] + } + }, + "reviewPaths": null + }, { "deletePaths": null, "failedPaths": null, @@ -80,5 +114,5 @@ ], "reviewPaths": ["spec"], "ruleStatus": "" -} -] \ No newline at end of file + } +] From f45ab4018031d4c7df0544642d9a471aac464860 Mon Sep 17 00:00:00 2001 From: kooomix Date: Sun, 21 Jul 2024 15:25:39 +0300 Subject: [PATCH 183/195] feat: Order Kubernetes releases by publication date This commit modifies the `get_kubernetes_supported_versions` function in `validations.py` to order the releases by their publication date. This ensures that the supported versions are listed in the correct chronological order. Additionally, the commit limits the number of supported versions to 5 instead of 3, as smaller versions might have updates after the latest major.minor version. The function then returns the top 3 versions in descending order. Note: This commit message follows the established convention of starting with a type prefix (feat for feature) and providing a concise and descriptive summary of the changes made. Signed-off-by: kooomix --- scripts/validations.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/scripts/validations.py b/scripts/validations.py index abfe74564..7cdb59721 100644 --- a/scripts/validations.py +++ b/scripts/validations.py @@ -1,4 +1,5 @@ import json +from operator import itemgetter import os import re import requests @@ -162,20 +163,33 @@ def get_kubernetes_supported_versions(): raise Exception("Failed to fetch Kubernetes releases") from e releases = response.json() + + # Order the releases by publication date + ordered_releases = sorted(releases, key=itemgetter('created_at'), reverse=True) + supported_versions = [] - for release in releases: + for release in ordered_releases: if not release['draft'] and not release['prerelease']: tag_name = release['tag_name'] if all(x not in tag_name for x in ['alpha', 'beta', 'rc']): major_minor_version = '.'.join(tag_name.lstrip('v').split('.')[:2]) if major_minor_version not in supported_versions: supported_versions.append(major_minor_version) - if len(supported_versions) == 3: + + # we are taking 5 since smaller versions might have updates after the latest major.minor version + if len(supported_versions) == 5: break if not supported_versions: raise Exception("No supported Kubernetes versions found.") - return supported_versions + + # Sort the versions in descending order as strings + sorted_versions = sorted(supported_versions, reverse=True) + + # Get the top 3 versions + top_3_versions = sorted_versions[:3] + + return top_3_versions def validate_k8s_supported_versions_in_rego(): # Step 1: Get the latest supported Kubernetes versions From a7da61b080ac69fe59d3bdb3a383b4ea07dd95a7 Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Sun, 21 Jul 2024 16:50:08 +0300 Subject: [PATCH 184/195] Changing the alert object Signed-off-by: Amit Schendel --- rules/unauthenticated-service/raw.rego | 3 +- .../test/fail_service/expected.json | 41 ++----------------- 2 files changed, 5 insertions(+), 39 deletions(-) diff --git a/rules/unauthenticated-service/raw.rego b/rules/unauthenticated-service/raw.rego index 950b484e8..be289fde6 100644 --- a/rules/unauthenticated-service/raw.rego +++ b/rules/unauthenticated-service/raw.rego @@ -27,9 +27,8 @@ deny contains msga if { "reviewPaths": [path], "failedPaths": [], "packagename": "armo_builtins", - "alertObject": {"k8sApiObjects": [service]}, + "alertObject": {"k8sApiObjects": [wl]}, "relatedObjects": [ - {"object": wl}, {"object": service}, {"object": service_scan_result}, ], diff --git a/rules/unauthenticated-service/test/fail_service/expected.json b/rules/unauthenticated-service/test/fail_service/expected.json index bd3170cf9..e15ff1092 100644 --- a/rules/unauthenticated-service/test/fail_service/expected.json +++ b/rules/unauthenticated-service/test/fail_service/expected.json @@ -5,8 +5,11 @@ "k8sApiObjects": [ { "apiVersion": "v1", - "kind": "Service", + "kind": "Pod", "metadata": { + "labels": { + "app": "operator" + }, "name": "operator" } } @@ -18,42 +21,6 @@ "fixPaths": [], "packagename": "armo_builtins", "relatedObjects": [ - { - "deletePaths": null, - "failedPaths": null, - "fixPaths": null, - "object": { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "labels": { - "app": "operator" - }, - "name": "operator", - "namespace": "kubescape" - }, - "spec": { - "containers": [ - { - "image": "your-operator-image:latest", - "name": "operator-container", - "ports": [ - { - "containerPort": 8080 - } - ], - "resources": { - "limits": { - "cpu": "1", - "memory": "1Gi" - } - } - } - ] - } - }, - "reviewPaths": null - }, { "deletePaths": null, "failedPaths": null, From 432ea942a8acf24762eff0d58cc22771e0d83439 Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Sun, 21 Jul 2024 17:05:31 +0300 Subject: [PATCH 185/195] Removing crd from related objects Signed-off-by: Amit Schendel --- rules/unauthenticated-service/raw.rego | 1 - .../test/fail_service/expected.json | 31 ------------------- 2 files changed, 32 deletions(-) diff --git a/rules/unauthenticated-service/raw.rego b/rules/unauthenticated-service/raw.rego index be289fde6..cdc4c9cc7 100644 --- a/rules/unauthenticated-service/raw.rego +++ b/rules/unauthenticated-service/raw.rego @@ -30,7 +30,6 @@ deny contains msga if { "alertObject": {"k8sApiObjects": [wl]}, "relatedObjects": [ {"object": service}, - {"object": service_scan_result}, ], } } diff --git a/rules/unauthenticated-service/test/fail_service/expected.json b/rules/unauthenticated-service/test/fail_service/expected.json index e15ff1092..6a14dcbd9 100644 --- a/rules/unauthenticated-service/test/fail_service/expected.json +++ b/rules/unauthenticated-service/test/fail_service/expected.json @@ -46,37 +46,6 @@ } }, "reviewPaths": null - }, - { - "deletePaths": null, - "failedPaths": null, - "fixPaths": null, - "object": { - "apiVersion": "kubescape.io/v1", - "kind": "ServiceScanResult", - "metadata": { - "creationTimestamp": "2024-07-03T04:40:17Z", - "generation": 4, - "name": "operator", - "namespace": "kubescape", - "resourceVersion": "2772", - "uid": "24dc622d-ee78-40c2-8654-2a5604715f95" - }, - "spec": { - "clusterIP": "10.103.207.220", - "ports": [ - { - "applicationLayer": "", - "authenticated": false, - "port": 4002, - "presentationLayer": "http", - "protocol": "TCP", - "sessionLayer": "tcp" - } - ] - } - }, - "reviewPaths": null } ], "reviewPaths": ["spec"], From b65e59d6f1eb692972e770b39fb8952e6c8f31dc Mon Sep 17 00:00:00 2001 From: rinao12 <121787861+rinao12@users.noreply.github.com> Date: Mon, 22 Jul 2024 15:25:12 +0300 Subject: [PATCH 186/195] SUB-3901 - New Attack Path -2: External facing database without authentication Signed-off-by: rinao12 <121787861+rinao12@users.noreply.github.com> --- .../workload-unauthenticated-service.json | 27 +++++++++++++++++++ controls/C-0274-unauthenticatedservice.json | 2 +- 2 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 attack-tracks/workload-unauthenticated-service.json diff --git a/attack-tracks/workload-unauthenticated-service.json b/attack-tracks/workload-unauthenticated-service.json new file mode 100644 index 000000000..42fddd225 --- /dev/null +++ b/attack-tracks/workload-unauthenticated-service.json @@ -0,0 +1,27 @@ +{ + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "workload-unauthenticated-service" + }, + "spec": { + "version": "1.0", + "data": { + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", + "subSteps": [ + { + "name": "Execution (Vulnerable Image)", + "description": "An attacker can execute malicious code by exploiting vulnerable images.", + "checksVulnerabilities": true, + "subSteps": [ + { + "name": "Data Collection", + "description": "An attacker can gather data." + } + ] + } + ] + } + } +} \ No newline at end of file diff --git a/controls/C-0274-unauthenticatedservice.json b/controls/C-0274-unauthenticatedservice.json index 17d4e11bc..d199fb929 100644 --- a/controls/C-0274-unauthenticatedservice.json +++ b/controls/C-0274-unauthenticatedservice.json @@ -11,7 +11,7 @@ ], "attackTracks": [ { - "attackTrack": "workload-external-track", + "attackTrack": "workload-unauthenticated-service", "categories": [ "Data Collection" ] From bd92e11101832c723c641a815f0d827ab4d83d91 Mon Sep 17 00:00:00 2001 From: kooomix Date: Tue, 23 Jul 2024 06:18:58 +0300 Subject: [PATCH 187/195] feat: Add unauthenticated service control This commit adds the unauthenticated service control to the `C-0256-exposuretointernet.json` and `C-0266-exposuretointernet-gateway.json` files. The control is categorized under "Initial Access" and is part of the "workload-unauthenticated-service" attack track. Note: This commit message follows the established convention of starting with a type prefix (feat for feature) and providing a concise and descriptive summary of the changes made. Signed-off-by: kooomix --- controls/C-0256-exposuretointernet.json | 6 ++++++ controls/C-0266-exposuretointernet-gateway.json | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/controls/C-0256-exposuretointernet.json b/controls/C-0256-exposuretointernet.json index 0abb72f5b..4e6247e51 100644 --- a/controls/C-0256-exposuretointernet.json +++ b/controls/C-0256-exposuretointernet.json @@ -28,6 +28,12 @@ "categories": [ "Initial Access" ] + }, + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Initial Access" + ] } ] }, diff --git a/controls/C-0266-exposuretointernet-gateway.json b/controls/C-0266-exposuretointernet-gateway.json index 776d1cef7..c349b0314 100644 --- a/controls/C-0266-exposuretointernet-gateway.json +++ b/controls/C-0266-exposuretointernet-gateway.json @@ -22,6 +22,12 @@ "categories": [ "Initial Access" ] + }, + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Initial Access" + ] } ] }, From 400c3b8c531680f0da72c11c2759ddedee724c05 Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Wed, 24 Jul 2024 17:22:48 +0300 Subject: [PATCH 188/195] Adding review path for the service Signed-off-by: Amit Schendel --- rules/unauthenticated-service/raw.rego | 4 +++- rules/unauthenticated-service/test/fail_service/expected.json | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/rules/unauthenticated-service/raw.rego b/rules/unauthenticated-service/raw.rego index cdc4c9cc7..1b2d7c273 100644 --- a/rules/unauthenticated-service/raw.rego +++ b/rules/unauthenticated-service/raw.rego @@ -29,7 +29,9 @@ deny contains msga if { "packagename": "armo_builtins", "alertObject": {"k8sApiObjects": [wl]}, "relatedObjects": [ - {"object": service}, + {"object": service, + "reviewPaths": ["spec"], + }, ], } } diff --git a/rules/unauthenticated-service/test/fail_service/expected.json b/rules/unauthenticated-service/test/fail_service/expected.json index 6a14dcbd9..8e3abc42f 100644 --- a/rules/unauthenticated-service/test/fail_service/expected.json +++ b/rules/unauthenticated-service/test/fail_service/expected.json @@ -45,7 +45,7 @@ } } }, - "reviewPaths": null + "reviewPaths": ["spec"] } ], "reviewPaths": ["spec"], From 86e5ebb70bf4a3f816fac1bb184c1666dadd35ab Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Thu, 25 Jul 2024 13:22:26 +0300 Subject: [PATCH 189/195] PR fixes Signed-off-by: Amit Schendel --- rules/unauthenticated-service/raw.rego | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/rules/unauthenticated-service/raw.rego b/rules/unauthenticated-service/raw.rego index 1b2d7c273..a0a3a95a0 100644 --- a/rules/unauthenticated-service/raw.rego +++ b/rules/unauthenticated-service/raw.rego @@ -17,14 +17,11 @@ deny contains msga if { service_name := service.metadata.name has_unauthenticated_service(service_name, service.metadata.namespace, service_scan_result) - # Path to the service object - path := "spec" - msga := { "alertMessage": sprintf("Unauthenticated service %v exposes %v", [service_name, wl.metadata.name]), "alertScore": 7, "fixPaths": [], - "reviewPaths": [path], + "reviewPaths": ["spec"], "failedPaths": [], "packagename": "armo_builtins", "alertObject": {"k8sApiObjects": [wl]}, From ef079e794fe6f11afb3b92c91505e09a1804d3c2 Mon Sep 17 00:00:00 2001 From: Amit Schendel Date: Thu, 25 Jul 2024 13:41:08 +0300 Subject: [PATCH 190/195] Adding CR fixes Signed-off-by: Amit Schendel --- rules/unauthenticated-service/raw.rego | 2 +- rules/unauthenticated-service/test/fail_service/expected.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rules/unauthenticated-service/raw.rego b/rules/unauthenticated-service/raw.rego index a0a3a95a0..6e311218c 100644 --- a/rules/unauthenticated-service/raw.rego +++ b/rules/unauthenticated-service/raw.rego @@ -21,7 +21,7 @@ deny contains msga if { "alertMessage": sprintf("Unauthenticated service %v exposes %v", [service_name, wl.metadata.name]), "alertScore": 7, "fixPaths": [], - "reviewPaths": ["spec"], + "reviewPaths": [], "failedPaths": [], "packagename": "armo_builtins", "alertObject": {"k8sApiObjects": [wl]}, diff --git a/rules/unauthenticated-service/test/fail_service/expected.json b/rules/unauthenticated-service/test/fail_service/expected.json index 8e3abc42f..3d733686b 100644 --- a/rules/unauthenticated-service/test/fail_service/expected.json +++ b/rules/unauthenticated-service/test/fail_service/expected.json @@ -48,7 +48,7 @@ "reviewPaths": ["spec"] } ], - "reviewPaths": ["spec"], + "reviewPaths": [], "ruleStatus": "" } ] From 2c91979dd477b73b75f2f772ae2d3fb5d960eb92 Mon Sep 17 00:00:00 2001 From: kooomix Date: Thu, 15 Aug 2024 14:20:19 +0300 Subject: [PATCH 191/195] feat: Update description of "Initial Access" in workload-unauthenticated-service.json Signed-off-by: kooomix --- attack-tracks/workload-unauthenticated-service.json | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/attack-tracks/workload-unauthenticated-service.json b/attack-tracks/workload-unauthenticated-service.json index 42fddd225..62ec29d4f 100644 --- a/attack-tracks/workload-unauthenticated-service.json +++ b/attack-tracks/workload-unauthenticated-service.json @@ -8,18 +8,11 @@ "version": "1.0", "data": { "name": "Initial Access", - "description": "An attacker can access the Kubernetes environment.", + "description": "The service is exposed outside the Kubernetes network.", "subSteps": [ { - "name": "Execution (Vulnerable Image)", - "description": "An attacker can execute malicious code by exploiting vulnerable images.", - "checksVulnerabilities": true, - "subSteps": [ - { - "name": "Data Collection", - "description": "An attacker can gather data." - } - ] + "name": "Data Collection", + "description": "Database access is missing authentication and it can be accessed by anyone" } ] } From 24102452a7fa4cda1eb00d5f6b5087d1b8575328 Mon Sep 17 00:00:00 2001 From: kooomix Date: Thu, 15 Aug 2024 15:15:46 +0300 Subject: [PATCH 192/195] feat: Update supported Kubernetes versions in outdated-k8s-version rule This commit updates the supported Kubernetes versions in the `outdated-k8s-version` rule. The versions `v1.31`, `v1.30`, and `v1.29` are now considered supported. Signed-off-by: kooomix --- rules/outdated-k8s-version/raw.rego | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/outdated-k8s-version/raw.rego b/rules/outdated-k8s-version/raw.rego index f592d87ce..810931a3b 100644 --- a/rules/outdated-k8s-version/raw.rego +++ b/rules/outdated-k8s-version/raw.rego @@ -18,7 +18,7 @@ deny[msga] { has_outdated_version(version) { # the `supported_k8s_versions` is validated in the validations script against "https://api.github.com/repos/kubernetes/kubernetes/releases" - supported_k8s_versions := ["v1.30", "v1.29", "v1.28"] + supported_k8s_versions := ["v1.31", "v1.30", "v1.29"] every v in supported_k8s_versions{ not startswith(version, v) } From f1a7df78b6a6698aa5b620928a7efe47bef9098a Mon Sep 17 00:00:00 2001 From: kooomix Date: Thu, 15 Aug 2024 15:48:51 +0300 Subject: [PATCH 193/195] Update kubelet and kube-proxy versions to v1.31.6 in node.json Signed-off-by: kooomix --- rules/outdated-k8s-version/test/pass/input/node.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rules/outdated-k8s-version/test/pass/input/node.json b/rules/outdated-k8s-version/test/pass/input/node.json index ee12e6144..30e409c70 100644 --- a/rules/outdated-k8s-version/test/pass/input/node.json +++ b/rules/outdated-k8s-version/test/pass/input/node.json @@ -200,8 +200,8 @@ "bootID": "85cb3c8a-7d8e-4885-9a9c-e8a340332f21", "containerRuntimeVersion": "docker://20.10.7", "kernelVersion": "5.11.0-43-generic", - "kubeProxyVersion": "v1.28.6", - "kubeletVersion": "v1.28.6", + "kubeProxyVersion": "v1.31.6", + "kubeletVersion": "v1.31.6", "machineID": "b77ec962e3734760b1e756ffc5e83152", "operatingSystem": "linux", "osImage": "Ubuntu 20.04.2 LTS", From 5d572aec98d00d406bbd0e0fff5e41c5d245cc66 Mon Sep 17 00:00:00 2001 From: kooomix Date: Sun, 18 Aug 2024 11:15:30 +0300 Subject: [PATCH 194/195] Update "Data Collection" step name to "Execution" in workload-unauthenticated-service.json Signed-off-by: kooomix --- attack-tracks/workload-unauthenticated-service.json | 2 +- controls/C-0274-unauthenticatedservice.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/attack-tracks/workload-unauthenticated-service.json b/attack-tracks/workload-unauthenticated-service.json index 62ec29d4f..d3eb6b961 100644 --- a/attack-tracks/workload-unauthenticated-service.json +++ b/attack-tracks/workload-unauthenticated-service.json @@ -11,7 +11,7 @@ "description": "The service is exposed outside the Kubernetes network.", "subSteps": [ { - "name": "Data Collection", + "name": "Execution", "description": "Database access is missing authentication and it can be accessed by anyone" } ] diff --git a/controls/C-0274-unauthenticatedservice.json b/controls/C-0274-unauthenticatedservice.json index d199fb929..0b208afe8 100644 --- a/controls/C-0274-unauthenticatedservice.json +++ b/controls/C-0274-unauthenticatedservice.json @@ -13,7 +13,7 @@ { "attackTrack": "workload-unauthenticated-service", "categories": [ - "Data Collection" + "Execution" ] } ] From 455425469b3c28d7c002c50187cb8375135c33a3 Mon Sep 17 00:00:00 2001 From: kooomix Date: Tue, 20 Aug 2024 10:57:17 +0300 Subject: [PATCH 195/195] Update relatedObjects in unauthenticated-service/raw.rego Signed-off-by: kooomix --- rules/unauthenticated-service/raw.rego | 5 ---- .../test/fail_service/expected.json | 29 +------------------ 2 files changed, 1 insertion(+), 33 deletions(-) diff --git a/rules/unauthenticated-service/raw.rego b/rules/unauthenticated-service/raw.rego index 6e311218c..4ba95ab14 100644 --- a/rules/unauthenticated-service/raw.rego +++ b/rules/unauthenticated-service/raw.rego @@ -25,11 +25,6 @@ deny contains msga if { "failedPaths": [], "packagename": "armo_builtins", "alertObject": {"k8sApiObjects": [wl]}, - "relatedObjects": [ - {"object": service, - "reviewPaths": ["spec"], - }, - ], } } diff --git a/rules/unauthenticated-service/test/fail_service/expected.json b/rules/unauthenticated-service/test/fail_service/expected.json index 3d733686b..b4e3533a3 100644 --- a/rules/unauthenticated-service/test/fail_service/expected.json +++ b/rules/unauthenticated-service/test/fail_service/expected.json @@ -20,34 +20,7 @@ "failedPaths": [], "fixPaths": [], "packagename": "armo_builtins", - "relatedObjects": [ - { - "deletePaths": null, - "failedPaths": null, - "fixPaths": null, - "object": { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "name": "operator", - "namespace": "kubescape" - }, - "spec": { - "ports": [ - { - "port": 4002, - "protocol": "TCP", - "targetPort": 8080 - } - ], - "selector": { - "app": "operator" - } - } - }, - "reviewPaths": ["spec"] - } - ], + "relatedObjects": [], "reviewPaths": [], "ruleStatus": "" }